Actual source code: matrix.c
petsc-3.10.5 2019-03-28
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc/private/matimpl.h>
7: #include <petsc/private/isimpl.h>
8: #include <petsc/private/vecimpl.h>
10: /* Logging support */
11: PetscClassId MAT_CLASSID;
12: PetscClassId MAT_COLORING_CLASSID;
13: PetscClassId MAT_FDCOLORING_CLASSID;
14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve,MAT_MatTrSolve;
18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_PartitioningND, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch;
36: PetscLogEvent MAT_ViennaCLCopyToGPU;
37: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
38: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;
40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
42: /*@
43: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
45: Logically Collective on Mat
47: Input Parameters:
48: + x - the matrix
49: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
50: it will create one internally.
52: Output Parameter:
53: . x - the matrix
55: Example of Usage:
56: .vb
57: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
58: MatSetRandom(x,rctx);
59: PetscRandomDestroy(rctx);
60: .ve
62: Level: intermediate
64: Concepts: matrix^setting to random
65: Concepts: random^matrix
67: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
68: @*/
69: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
70: {
72: PetscRandom randObj = NULL;
79: if (!x->ops->setrandom) SETERRQ1(PetscObjectComm((PetscObject)x),PETSC_ERR_SUP,"Mat type %s",((PetscObject)x)->type_name);
81: if (!rctx) {
82: MPI_Comm comm;
83: PetscObjectGetComm((PetscObject)x,&comm);
84: PetscRandomCreate(comm,&randObj);
85: PetscRandomSetFromOptions(randObj);
86: rctx = randObj;
87: }
89: PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
90: (*x->ops->setrandom)(x,rctx);
91: PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);
93: x->assembled = PETSC_TRUE;
94: PetscRandomDestroy(&randObj);
95: return(0);
96: }
98: /*@
99: MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in
101: Logically Collective on Mat
103: Input Parameters:
104: . mat - the factored matrix
106: Output Parameter:
107: + pivot - the pivot value computed
108: - row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
109: the share the matrix
111: Level: advanced
113: Notes:
114: This routine does not work for factorizations done with external packages.
115: This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT
117: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
119: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
120: @*/
121: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
122: {
125: *pivot = mat->factorerror_zeropivot_value;
126: *row = mat->factorerror_zeropivot_row;
127: return(0);
128: }
130: /*@
131: MatFactorGetError - gets the error code from a factorization
133: Logically Collective on Mat
135: Input Parameters:
136: . mat - the factored matrix
138: Output Parameter:
139: . err - the error code
141: Level: advanced
143: Notes:
144: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
146: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
147: @*/
148: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
149: {
152: *err = mat->factorerrortype;
153: return(0);
154: }
156: /*@
157: MatFactorClearError - clears the error code in a factorization
159: Logically Collective on Mat
161: Input Parameter:
162: . mat - the factored matrix
164: Level: developer
166: Notes:
167: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
169: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
170: @*/
171: PetscErrorCode MatFactorClearError(Mat mat)
172: {
175: mat->factorerrortype = MAT_FACTOR_NOERROR;
176: mat->factorerror_zeropivot_value = 0.0;
177: mat->factorerror_zeropivot_row = 0;
178: return(0);
179: }
181: PETSC_INTERN PetscErrorCode MatFindNonzeroRowsOrCols_Basic(Mat mat,PetscBool cols,PetscReal tol,IS *nonzero)
182: {
183: PetscErrorCode ierr;
184: Vec r,l;
185: const PetscScalar *al;
186: PetscInt i,nz,gnz,N,n;
189: MatCreateVecs(mat,&r,&l);
190: if (!cols) { /* nonzero rows */
191: MatGetSize(mat,&N,NULL);
192: MatGetLocalSize(mat,&n,NULL);
193: VecSet(l,0.0);
194: VecSetRandom(r,NULL);
195: MatMult(mat,r,l);
196: VecGetArrayRead(l,&al);
197: } else { /* nonzero columns */
198: MatGetSize(mat,NULL,&N);
199: MatGetLocalSize(mat,NULL,&n);
200: VecSet(r,0.0);
201: VecSetRandom(l,NULL);
202: MatMultTranspose(mat,l,r);
203: VecGetArrayRead(r,&al);
204: }
205: if (tol <= 0.0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++; }
206: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nz++; }
207: MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
208: if (gnz != N) {
209: PetscInt *nzr;
210: PetscMalloc1(nz,&nzr);
211: if (nz) {
212: if (tol < 0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
213: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nzr[nz++] = i; }
214: }
215: ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,nonzero);
216: } else *nonzero = NULL;
217: if (!cols) { /* nonzero rows */
218: VecRestoreArrayRead(l,&al);
219: } else {
220: VecRestoreArrayRead(r,&al);
221: }
222: VecDestroy(&l);
223: VecDestroy(&r);
224: return(0);
225: }
227: /*@
228: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
230: Input Parameter:
231: . A - the matrix
233: Output Parameter:
234: . keptrows - the rows that are not completely zero
236: Notes:
237: keptrows is set to NULL if all rows are nonzero.
239: Level: intermediate
241: @*/
242: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
243: {
250: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
251: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
252: if (!mat->ops->findnonzerorows) {
253: MatFindNonzeroRowsOrCols_Basic(mat,PETSC_FALSE,0.0,keptrows);
254: } else {
255: (*mat->ops->findnonzerorows)(mat,keptrows);
256: }
257: return(0);
258: }
260: /*@
261: MatFindZeroRows - Locate all rows that are completely zero in the matrix
263: Input Parameter:
264: . A - the matrix
266: Output Parameter:
267: . zerorows - the rows that are completely zero
269: Notes:
270: zerorows is set to NULL if no rows are zero.
272: Level: intermediate
274: @*/
275: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
276: {
278: IS keptrows;
279: PetscInt m, n;
284: MatFindNonzeroRows(mat, &keptrows);
285: /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
286: In keeping with this convention, we set zerorows to NULL if there are no zero
287: rows. */
288: if (keptrows == NULL) {
289: *zerorows = NULL;
290: } else {
291: MatGetOwnershipRange(mat,&m,&n);
292: ISComplement(keptrows,m,n,zerorows);
293: ISDestroy(&keptrows);
294: }
295: return(0);
296: }
298: /*@
299: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
301: Not Collective
303: Input Parameters:
304: . A - the matrix
306: Output Parameters:
307: . a - the diagonal part (which is a SEQUENTIAL matrix)
309: Notes:
310: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
311: Use caution, as the reference count on the returned matrix is not incremented and it is used as
312: part of the containing MPI Mat's normal operation.
314: Level: advanced
316: @*/
317: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
318: {
325: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
326: if (!A->ops->getdiagonalblock) {
327: PetscMPIInt size;
328: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
329: if (size == 1) {
330: *a = A;
331: return(0);
332: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Not coded for this matrix type");
333: }
334: (*A->ops->getdiagonalblock)(A,a);
335: return(0);
336: }
338: /*@
339: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
341: Collective on Mat
343: Input Parameters:
344: . mat - the matrix
346: Output Parameter:
347: . trace - the sum of the diagonal entries
349: Level: advanced
351: @*/
352: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
353: {
355: Vec diag;
358: MatCreateVecs(mat,&diag,NULL);
359: MatGetDiagonal(mat,diag);
360: VecSum(diag,trace);
361: VecDestroy(&diag);
362: return(0);
363: }
365: /*@
366: MatRealPart - Zeros out the imaginary part of the matrix
368: Logically Collective on Mat
370: Input Parameters:
371: . mat - the matrix
373: Level: advanced
376: .seealso: MatImaginaryPart()
377: @*/
378: PetscErrorCode MatRealPart(Mat mat)
379: {
385: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
386: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
387: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
388: MatCheckPreallocated(mat,1);
389: (*mat->ops->realpart)(mat);
390: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
391: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
392: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
393: }
394: #endif
395: return(0);
396: }
398: /*@C
399: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
401: Collective on Mat
403: Input Parameter:
404: . mat - the matrix
406: Output Parameters:
407: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
408: - ghosts - the global indices of the ghost points
410: Notes:
411: the nghosts and ghosts are suitable to pass into VecCreateGhost()
413: Level: advanced
415: @*/
416: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
417: {
423: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
424: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
425: if (!mat->ops->getghosts) {
426: if (nghosts) *nghosts = 0;
427: if (ghosts) *ghosts = 0;
428: } else {
429: (*mat->ops->getghosts)(mat,nghosts,ghosts);
430: }
431: return(0);
432: }
435: /*@
436: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
438: Logically Collective on Mat
440: Input Parameters:
441: . mat - the matrix
443: Level: advanced
446: .seealso: MatRealPart()
447: @*/
448: PetscErrorCode MatImaginaryPart(Mat mat)
449: {
455: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
456: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
457: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
458: MatCheckPreallocated(mat,1);
459: (*mat->ops->imaginarypart)(mat);
460: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
461: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
462: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
463: }
464: #endif
465: return(0);
466: }
468: /*@
469: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
471: Not Collective
473: Input Parameter:
474: . mat - the matrix
476: Output Parameters:
477: + missing - is any diagonal missing
478: - dd - first diagonal entry that is missing (optional) on this process
480: Level: advanced
483: .seealso: MatRealPart()
484: @*/
485: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
486: {
492: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
493: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
494: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
495: (*mat->ops->missingdiagonal)(mat,missing,dd);
496: return(0);
497: }
499: /*@C
500: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
501: for each row that you get to ensure that your application does
502: not bleed memory.
504: Not Collective
506: Input Parameters:
507: + mat - the matrix
508: - row - the row to get
510: Output Parameters:
511: + ncols - if not NULL, the number of nonzeros in the row
512: . cols - if not NULL, the column numbers
513: - vals - if not NULL, the values
515: Notes:
516: This routine is provided for people who need to have direct access
517: to the structure of a matrix. We hope that we provide enough
518: high-level matrix routines that few users will need it.
520: MatGetRow() always returns 0-based column indices, regardless of
521: whether the internal representation is 0-based (default) or 1-based.
523: For better efficiency, set cols and/or vals to NULL if you do
524: not wish to extract these quantities.
526: The user can only examine the values extracted with MatGetRow();
527: the values cannot be altered. To change the matrix entries, one
528: must use MatSetValues().
530: You can only have one call to MatGetRow() outstanding for a particular
531: matrix at a time, per processor. MatGetRow() can only obtain rows
532: associated with the given processor, it cannot get rows from the
533: other processors; for that we suggest using MatCreateSubMatrices(), then
534: MatGetRow() on the submatrix. The row index passed to MatGetRows()
535: is in the global number of rows.
537: Fortran Notes:
538: The calling sequence from Fortran is
539: .vb
540: MatGetRow(matrix,row,ncols,cols,values,ierr)
541: Mat matrix (input)
542: integer row (input)
543: integer ncols (output)
544: integer cols(maxcols) (output)
545: double precision (or double complex) values(maxcols) output
546: .ve
547: where maxcols >= maximum nonzeros in any row of the matrix.
550: Caution:
551: Do not try to change the contents of the output arrays (cols and vals).
552: In some cases, this may corrupt the matrix.
554: Level: advanced
556: Concepts: matrices^row access
558: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
559: @*/
560: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
561: {
563: PetscInt incols;
568: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
569: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
570: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
571: MatCheckPreallocated(mat,1);
572: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
573: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
574: if (ncols) *ncols = incols;
575: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
576: return(0);
577: }
579: /*@
580: MatConjugate - replaces the matrix values with their complex conjugates
582: Logically Collective on Mat
584: Input Parameters:
585: . mat - the matrix
587: Level: advanced
589: .seealso: VecConjugate()
590: @*/
591: PetscErrorCode MatConjugate(Mat mat)
592: {
593: #if defined(PETSC_USE_COMPLEX)
598: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
599: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
600: (*mat->ops->conjugate)(mat);
601: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
602: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
603: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
604: }
605: #endif
606: return(0);
607: #else
608: return 0;
609: #endif
610: }
612: /*@C
613: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
615: Not Collective
617: Input Parameters:
618: + mat - the matrix
619: . row - the row to get
620: . ncols, cols - the number of nonzeros and their columns
621: - vals - if nonzero the column values
623: Notes:
624: This routine should be called after you have finished examining the entries.
626: This routine zeros out ncols, cols, and vals. This is to prevent accidental
627: us of the array after it has been restored. If you pass NULL, it will
628: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
630: Fortran Notes:
631: The calling sequence from Fortran is
632: .vb
633: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
634: Mat matrix (input)
635: integer row (input)
636: integer ncols (output)
637: integer cols(maxcols) (output)
638: double precision (or double complex) values(maxcols) output
639: .ve
640: Where maxcols >= maximum nonzeros in any row of the matrix.
642: In Fortran MatRestoreRow() MUST be called after MatGetRow()
643: before another call to MatGetRow() can be made.
645: Level: advanced
647: .seealso: MatGetRow()
648: @*/
649: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
650: {
656: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
657: if (!mat->ops->restorerow) return(0);
658: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
659: if (ncols) *ncols = 0;
660: if (cols) *cols = NULL;
661: if (vals) *vals = NULL;
662: return(0);
663: }
665: /*@
666: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
667: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
669: Not Collective
671: Input Parameters:
672: + mat - the matrix
674: Notes:
675: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
677: Level: advanced
679: Concepts: matrices^row access
681: .seealso: MatRestoreRowRowUpperTriangular()
682: @*/
683: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
684: {
690: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
691: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
692: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
693: MatCheckPreallocated(mat,1);
694: (*mat->ops->getrowuppertriangular)(mat);
695: return(0);
696: }
698: /*@
699: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
701: Not Collective
703: Input Parameters:
704: + mat - the matrix
706: Notes:
707: This routine should be called after you have finished MatGetRow/MatRestoreRow().
710: Level: advanced
712: .seealso: MatGetRowUpperTriangular()
713: @*/
714: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
715: {
720: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
721: if (!mat->ops->restorerowuppertriangular) return(0);
722: (*mat->ops->restorerowuppertriangular)(mat);
723: return(0);
724: }
726: /*@C
727: MatSetOptionsPrefix - Sets the prefix used for searching for all
728: Mat options in the database.
730: Logically Collective on Mat
732: Input Parameter:
733: + A - the Mat context
734: - prefix - the prefix to prepend to all option names
736: Notes:
737: A hyphen (-) must NOT be given at the beginning of the prefix name.
738: The first character of all runtime options is AUTOMATICALLY the hyphen.
740: Level: advanced
742: .keywords: Mat, set, options, prefix, database
744: .seealso: MatSetFromOptions()
745: @*/
746: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
747: {
752: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
753: return(0);
754: }
756: /*@C
757: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
758: Mat options in the database.
760: Logically Collective on Mat
762: Input Parameters:
763: + A - the Mat context
764: - prefix - the prefix to prepend to all option names
766: Notes:
767: A hyphen (-) must NOT be given at the beginning of the prefix name.
768: The first character of all runtime options is AUTOMATICALLY the hyphen.
770: Level: advanced
772: .keywords: Mat, append, options, prefix, database
774: .seealso: MatGetOptionsPrefix()
775: @*/
776: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
777: {
782: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
783: return(0);
784: }
786: /*@C
787: MatGetOptionsPrefix - Sets the prefix used for searching for all
788: Mat options in the database.
790: Not Collective
792: Input Parameter:
793: . A - the Mat context
795: Output Parameter:
796: . prefix - pointer to the prefix string used
798: Notes:
799: On the fortran side, the user should pass in a string 'prefix' of
800: sufficient length to hold the prefix.
802: Level: advanced
804: .keywords: Mat, get, options, prefix, database
806: .seealso: MatAppendOptionsPrefix()
807: @*/
808: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
809: {
814: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
815: return(0);
816: }
818: /*@
819: MatResetPreallocation - Reset mat to use the original nonzero pattern provided by users.
821: Collective on Mat
823: Input Parameters:
824: . A - the Mat context
826: Notes:
827: The allocated memory will be shrunk after calling MatAssembly with MAT_FINAL_ASSEMBLY. Users can reset the preallocation to access the original memory.
828: Currently support MPIAIJ and SEQAIJ.
830: Level: beginner
832: .keywords: Mat, ResetPreallocation
834: .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatXAIJSetPreallocation()
835: @*/
836: PetscErrorCode MatResetPreallocation(Mat A)
837: {
843: PetscUseMethod(A,"MatResetPreallocation_C",(Mat),(A));
844: return(0);
845: }
848: /*@
849: MatSetUp - Sets up the internal matrix data structures for the later use.
851: Collective on Mat
853: Input Parameters:
854: . A - the Mat context
856: Notes:
857: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
859: If a suitable preallocation routine is used, this function does not need to be called.
861: See the Performance chapter of the PETSc users manual for how to preallocate matrices
863: Level: beginner
865: .keywords: Mat, setup
867: .seealso: MatCreate(), MatDestroy()
868: @*/
869: PetscErrorCode MatSetUp(Mat A)
870: {
871: PetscMPIInt size;
876: if (!((PetscObject)A)->type_name) {
877: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
878: if (size == 1) {
879: MatSetType(A, MATSEQAIJ);
880: } else {
881: MatSetType(A, MATMPIAIJ);
882: }
883: }
884: if (!A->preallocated && A->ops->setup) {
885: PetscInfo(A,"Warning not preallocating matrix storage\n");
886: (*A->ops->setup)(A);
887: }
888: PetscLayoutSetUp(A->rmap);
889: PetscLayoutSetUp(A->cmap);
890: A->preallocated = PETSC_TRUE;
891: return(0);
892: }
894: #if defined(PETSC_HAVE_SAWS)
895: #include <petscviewersaws.h>
896: #endif
897: /*@C
898: MatView - Visualizes a matrix object.
900: Collective on Mat
902: Input Parameters:
903: + mat - the matrix
904: - viewer - visualization context
906: Notes:
907: The available visualization contexts include
908: + PETSC_VIEWER_STDOUT_SELF - for sequential matrices
909: . PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
910: . PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
911: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
913: The user can open alternative visualization contexts with
914: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
915: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
916: specified file; corresponding input uses MatLoad()
917: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
918: an X window display
919: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
920: Currently only the sequential dense and AIJ
921: matrix types support the Socket viewer.
923: The user can call PetscViewerPushFormat() to specify the output
924: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
925: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
926: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
927: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
928: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
929: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
930: format common among all matrix types
931: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
932: format (which is in many cases the same as the default)
933: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
934: size and structure (not the matrix entries)
935: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
936: the matrix structure
938: Options Database Keys:
939: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
940: . -mat_view ::ascii_info_detail - Prints more detailed info
941: . -mat_view - Prints matrix in ASCII format
942: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
943: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
944: . -display <name> - Sets display name (default is host)
945: . -draw_pause <sec> - Sets number of seconds to pause after display
946: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 12 Using MATLAB with PETSc for details)
947: . -viewer_socket_machine <machine> -
948: . -viewer_socket_port <port> -
949: . -mat_view binary - save matrix to file in binary format
950: - -viewer_binary_filename <name> -
951: Level: beginner
953: Notes:
954: see the manual page for MatLoad() for the exact format of the binary file when the binary
955: viewer is used.
957: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
958: viewer is used.
960: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
961: And then use the following mouse functions:
962: left mouse: zoom in
963: middle mouse: zoom out
964: right mouse: continue with the simulation
966: Concepts: matrices^viewing
967: Concepts: matrices^plotting
968: Concepts: matrices^printing
970: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
971: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
972: @*/
973: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
974: {
975: PetscErrorCode ierr;
976: PetscInt rows,cols,rbs,cbs;
977: PetscBool iascii,ibinary;
978: PetscViewerFormat format;
979: PetscMPIInt size;
980: #if defined(PETSC_HAVE_SAWS)
981: PetscBool issaws;
982: #endif
987: if (!viewer) {
988: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
989: }
992: MatCheckPreallocated(mat,1);
993: PetscViewerGetFormat(viewer,&format);
994: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
995: if (size == 1 && format == PETSC_VIEWER_LOAD_BALANCE) return(0);
996: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
997: if (ibinary) {
998: PetscBool mpiio;
999: PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
1000: if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
1001: }
1003: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
1004: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1005: if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
1006: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
1007: }
1009: #if defined(PETSC_HAVE_SAWS)
1010: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
1011: #endif
1012: if (iascii) {
1013: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1014: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
1015: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1016: MatNullSpace nullsp,transnullsp;
1018: PetscViewerASCIIPushTab(viewer);
1019: MatGetSize(mat,&rows,&cols);
1020: MatGetBlockSizes(mat,&rbs,&cbs);
1021: if (rbs != 1 || cbs != 1) {
1022: if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
1023: else {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
1024: } else {
1025: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
1026: }
1027: if (mat->factortype) {
1028: MatSolverType solver;
1029: MatFactorGetSolverType(mat,&solver);
1030: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
1031: }
1032: if (mat->ops->getinfo) {
1033: MatInfo info;
1034: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
1035: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
1036: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
1037: }
1038: MatGetNullSpace(mat,&nullsp);
1039: MatGetTransposeNullSpace(mat,&transnullsp);
1040: if (nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
1041: if (transnullsp && transnullsp != nullsp) {PetscViewerASCIIPrintf(viewer," has attached transposed null space\n");}
1042: MatGetNearNullSpace(mat,&nullsp);
1043: if (nullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
1044: }
1045: #if defined(PETSC_HAVE_SAWS)
1046: } else if (issaws) {
1047: PetscMPIInt rank;
1049: PetscObjectName((PetscObject)mat);
1050: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
1051: if (!((PetscObject)mat)->amsmem && !rank) {
1052: PetscObjectViewSAWs((PetscObject)mat,viewer);
1053: }
1054: #endif
1055: }
1056: if ((format == PETSC_VIEWER_NATIVE || format == PETSC_VIEWER_LOAD_BALANCE) && mat->ops->viewnative) {
1057: PetscViewerASCIIPushTab(viewer);
1058: (*mat->ops->viewnative)(mat,viewer);
1059: PetscViewerASCIIPopTab(viewer);
1060: } else if (mat->ops->view) {
1061: PetscViewerASCIIPushTab(viewer);
1062: (*mat->ops->view)(mat,viewer);
1063: PetscViewerASCIIPopTab(viewer);
1064: }
1065: if (iascii) {
1066: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
1067: PetscViewerGetFormat(viewer,&format);
1068: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1069: PetscViewerASCIIPopTab(viewer);
1070: }
1071: }
1072: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1073: return(0);
1074: }
1076: #if defined(PETSC_USE_DEBUG)
1077: #include <../src/sys/totalview/tv_data_display.h>
1078: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1079: {
1080: TV_add_row("Local rows", "int", &mat->rmap->n);
1081: TV_add_row("Local columns", "int", &mat->cmap->n);
1082: TV_add_row("Global rows", "int", &mat->rmap->N);
1083: TV_add_row("Global columns", "int", &mat->cmap->N);
1084: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1085: return TV_format_OK;
1086: }
1087: #endif
1089: /*@C
1090: MatLoad - Loads a matrix that has been stored in binary format
1091: with MatView(). The matrix format is determined from the options database.
1092: Generates a parallel MPI matrix if the communicator has more than one
1093: processor. The default matrix type is AIJ.
1095: Collective on PetscViewer
1097: Input Parameters:
1098: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
1099: or some related function before a call to MatLoad()
1100: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
1102: Options Database Keys:
1103: Used with block matrix formats (MATSEQBAIJ, ...) to specify
1104: block size
1105: . -matload_block_size <bs>
1107: Level: beginner
1109: Notes:
1110: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1111: Mat before calling this routine if you wish to set it from the options database.
1113: MatLoad() automatically loads into the options database any options
1114: given in the file filename.info where filename is the name of the file
1115: that was passed to the PetscViewerBinaryOpen(). The options in the info
1116: file will be ignored if you use the -viewer_binary_skip_info option.
1118: If the type or size of newmat is not set before a call to MatLoad, PETSc
1119: sets the default matrix type AIJ and sets the local and global sizes.
1120: If type and/or size is already set, then the same are used.
1122: In parallel, each processor can load a subset of rows (or the
1123: entire matrix). This routine is especially useful when a large
1124: matrix is stored on disk and only part of it is desired on each
1125: processor. For example, a parallel solver may access only some of
1126: the rows from each processor. The algorithm used here reads
1127: relatively small blocks of data rather than reading the entire
1128: matrix and then subsetting it.
1130: Notes for advanced users:
1131: Most users should not need to know the details of the binary storage
1132: format, since MatLoad() and MatView() completely hide these details.
1133: But for anyone who's interested, the standard binary matrix storage
1134: format is
1136: $ int MAT_FILE_CLASSID
1137: $ int number of rows
1138: $ int number of columns
1139: $ int total number of nonzeros
1140: $ int *number nonzeros in each row
1141: $ int *column indices of all nonzeros (starting index is zero)
1142: $ PetscScalar *values of all nonzeros
1144: PETSc automatically does the byte swapping for
1145: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
1146: linux, Windows and the paragon; thus if you write your own binary
1147: read/write routines you have to swap the bytes; see PetscBinaryRead()
1148: and PetscBinaryWrite() to see how this may be done.
1150: .keywords: matrix, load, binary, input
1152: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
1154: @*/
1155: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
1156: {
1158: PetscBool isbinary,flg;
1163: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1164: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
1166: if (!((PetscObject)newmat)->type_name) {
1167: MatSetType(newmat,MATAIJ);
1168: }
1170: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1171: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1172: (*newmat->ops->load)(newmat,viewer);
1173: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
1175: flg = PETSC_FALSE;
1176: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1177: if (flg) {
1178: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1179: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1180: }
1181: flg = PETSC_FALSE;
1182: PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1183: if (flg) {
1184: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1185: }
1186: return(0);
1187: }
1189: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1190: {
1192: Mat_Redundant *redund = *redundant;
1193: PetscInt i;
1196: if (redund){
1197: if (redund->matseq) { /* via MatCreateSubMatrices() */
1198: ISDestroy(&redund->isrow);
1199: ISDestroy(&redund->iscol);
1200: MatDestroySubMatrices(1,&redund->matseq);
1201: } else {
1202: PetscFree2(redund->send_rank,redund->recv_rank);
1203: PetscFree(redund->sbuf_j);
1204: PetscFree(redund->sbuf_a);
1205: for (i=0; i<redund->nrecvs; i++) {
1206: PetscFree(redund->rbuf_j[i]);
1207: PetscFree(redund->rbuf_a[i]);
1208: }
1209: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1210: }
1212: if (redund->subcomm) {
1213: PetscCommDestroy(&redund->subcomm);
1214: }
1215: PetscFree(redund);
1216: }
1217: return(0);
1218: }
1220: /*@
1221: MatDestroy - Frees space taken by a matrix.
1223: Collective on Mat
1225: Input Parameter:
1226: . A - the matrix
1228: Level: beginner
1230: @*/
1231: PetscErrorCode MatDestroy(Mat *A)
1232: {
1236: if (!*A) return(0);
1238: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1240: /* if memory was published with SAWs then destroy it */
1241: PetscObjectSAWsViewOff((PetscObject)*A);
1242: if ((*A)->ops->destroy) {
1243: (*(*A)->ops->destroy)(*A);
1244: }
1246: PetscFree((*A)->defaultvectype);
1247: PetscFree((*A)->bsizes);
1248: PetscFree((*A)->solvertype);
1249: MatDestroy_Redundant(&(*A)->redundant);
1250: MatNullSpaceDestroy(&(*A)->nullsp);
1251: MatNullSpaceDestroy(&(*A)->transnullsp);
1252: MatNullSpaceDestroy(&(*A)->nearnullsp);
1253: MatDestroy(&(*A)->schur);
1254: PetscLayoutDestroy(&(*A)->rmap);
1255: PetscLayoutDestroy(&(*A)->cmap);
1256: PetscHeaderDestroy(A);
1257: return(0);
1258: }
1260: /*@C
1261: MatSetValues - Inserts or adds a block of values into a matrix.
1262: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1263: MUST be called after all calls to MatSetValues() have been completed.
1265: Not Collective
1267: Input Parameters:
1268: + mat - the matrix
1269: . v - a logically two-dimensional array of values
1270: . m, idxm - the number of rows and their global indices
1271: . n, idxn - the number of columns and their global indices
1272: - addv - either ADD_VALUES or INSERT_VALUES, where
1273: ADD_VALUES adds values to any existing entries, and
1274: INSERT_VALUES replaces existing entries with new values
1276: Notes:
1277: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1278: MatSetUp() before using this routine
1280: By default the values, v, are row-oriented. See MatSetOption() for other options.
1282: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1283: options cannot be mixed without intervening calls to the assembly
1284: routines.
1286: MatSetValues() uses 0-based row and column numbers in Fortran
1287: as well as in C.
1289: Negative indices may be passed in idxm and idxn, these rows and columns are
1290: simply ignored. This allows easily inserting element stiffness matrices
1291: with homogeneous Dirchlet boundary conditions that you don't want represented
1292: in the matrix.
1294: Efficiency Alert:
1295: The routine MatSetValuesBlocked() may offer much better efficiency
1296: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1298: Level: beginner
1300: Developer Notes:
1301: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1302: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
1304: Concepts: matrices^putting entries in
1306: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1307: InsertMode, INSERT_VALUES, ADD_VALUES
1308: @*/
1309: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1310: {
1312: #if defined(PETSC_USE_DEBUG)
1313: PetscInt i,j;
1314: #endif
1319: if (!m || !n) return(0); /* no values to insert */
1323: MatCheckPreallocated(mat,1);
1324: if (mat->insertmode == NOT_SET_VALUES) {
1325: mat->insertmode = addv;
1326: }
1327: #if defined(PETSC_USE_DEBUG)
1328: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1329: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1330: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1332: for (i=0; i<m; i++) {
1333: for (j=0; j<n; j++) {
1334: if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1335: #if defined(PETSC_USE_COMPLEX)
1336: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1337: #else
1338: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1339: #endif
1340: }
1341: }
1342: #endif
1344: if (mat->assembled) {
1345: mat->was_assembled = PETSC_TRUE;
1346: mat->assembled = PETSC_FALSE;
1347: }
1348: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1349: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1350: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1351: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1352: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1353: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1354: }
1355: #endif
1356: return(0);
1357: }
1360: /*@
1361: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1362: values into a matrix
1364: Not Collective
1366: Input Parameters:
1367: + mat - the matrix
1368: . row - the (block) row to set
1369: - v - a logically two-dimensional array of values
1371: Notes:
1372: By the values, v, are column-oriented (for the block version) and sorted
1374: All the nonzeros in the row must be provided
1376: The matrix must have previously had its column indices set
1378: The row must belong to this process
1380: Level: intermediate
1382: Concepts: matrices^putting entries in
1384: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1385: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1386: @*/
1387: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1388: {
1390: PetscInt globalrow;
1396: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1397: MatSetValuesRow(mat,globalrow,v);
1398: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1399: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1400: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1401: }
1402: #endif
1403: return(0);
1404: }
1406: /*@
1407: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1408: values into a matrix
1410: Not Collective
1412: Input Parameters:
1413: + mat - the matrix
1414: . row - the (block) row to set
1415: - v - a logically two-dimensional (column major) array of values for block matrices with blocksize larger than one, otherwise a one dimensional array of values
1417: Notes:
1418: The values, v, are column-oriented for the block version.
1420: All the nonzeros in the row must be provided
1422: THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1424: The row must belong to this process
1426: Level: advanced
1428: Concepts: matrices^putting entries in
1430: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1431: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1432: @*/
1433: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1434: {
1440: MatCheckPreallocated(mat,1);
1442: #if defined(PETSC_USE_DEBUG)
1443: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1444: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1445: #endif
1446: mat->insertmode = INSERT_VALUES;
1448: if (mat->assembled) {
1449: mat->was_assembled = PETSC_TRUE;
1450: mat->assembled = PETSC_FALSE;
1451: }
1452: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1453: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1454: (*mat->ops->setvaluesrow)(mat,row,v);
1455: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1456: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1457: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1458: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1459: }
1460: #endif
1461: return(0);
1462: }
1464: /*@
1465: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1466: Using structured grid indexing
1468: Not Collective
1470: Input Parameters:
1471: + mat - the matrix
1472: . m - number of rows being entered
1473: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1474: . n - number of columns being entered
1475: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1476: . v - a logically two-dimensional array of values
1477: - addv - either ADD_VALUES or INSERT_VALUES, where
1478: ADD_VALUES adds values to any existing entries, and
1479: INSERT_VALUES replaces existing entries with new values
1481: Notes:
1482: By default the values, v, are row-oriented. See MatSetOption() for other options.
1484: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1485: options cannot be mixed without intervening calls to the assembly
1486: routines.
1488: The grid coordinates are across the entire grid, not just the local portion
1490: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1491: as well as in C.
1493: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1495: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1496: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1498: The columns and rows in the stencil passed in MUST be contained within the
1499: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1500: if you create a DMDA with an overlap of one grid level and on a particular process its first
1501: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1502: first i index you can use in your column and row indices in MatSetStencil() is 5.
1504: In Fortran idxm and idxn should be declared as
1505: $ MatStencil idxm(4,m),idxn(4,n)
1506: and the values inserted using
1507: $ idxm(MatStencil_i,1) = i
1508: $ idxm(MatStencil_j,1) = j
1509: $ idxm(MatStencil_k,1) = k
1510: $ idxm(MatStencil_c,1) = c
1511: etc
1513: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1514: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1515: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1516: DM_BOUNDARY_PERIODIC boundary type.
1518: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1519: a single value per point) you can skip filling those indices.
1521: Inspired by the structured grid interface to the HYPRE package
1522: (http://www.llnl.gov/CASC/hypre)
1524: Efficiency Alert:
1525: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1526: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1528: Level: beginner
1530: Concepts: matrices^putting entries in
1532: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1533: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1534: @*/
1535: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1536: {
1538: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1539: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1540: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1543: if (!m || !n) return(0); /* no values to insert */
1550: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1551: jdxm = buf; jdxn = buf+m;
1552: } else {
1553: PetscMalloc2(m,&bufm,n,&bufn);
1554: jdxm = bufm; jdxn = bufn;
1555: }
1556: for (i=0; i<m; i++) {
1557: for (j=0; j<3-sdim; j++) dxm++;
1558: tmp = *dxm++ - starts[0];
1559: for (j=0; j<dim-1; j++) {
1560: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1561: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1562: }
1563: if (mat->stencil.noc) dxm++;
1564: jdxm[i] = tmp;
1565: }
1566: for (i=0; i<n; i++) {
1567: for (j=0; j<3-sdim; j++) dxn++;
1568: tmp = *dxn++ - starts[0];
1569: for (j=0; j<dim-1; j++) {
1570: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1571: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1572: }
1573: if (mat->stencil.noc) dxn++;
1574: jdxn[i] = tmp;
1575: }
1576: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1577: PetscFree2(bufm,bufn);
1578: return(0);
1579: }
1581: /*@
1582: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1583: Using structured grid indexing
1585: Not Collective
1587: Input Parameters:
1588: + mat - the matrix
1589: . m - number of rows being entered
1590: . idxm - grid coordinates for matrix rows being entered
1591: . n - number of columns being entered
1592: . idxn - grid coordinates for matrix columns being entered
1593: . v - a logically two-dimensional array of values
1594: - addv - either ADD_VALUES or INSERT_VALUES, where
1595: ADD_VALUES adds values to any existing entries, and
1596: INSERT_VALUES replaces existing entries with new values
1598: Notes:
1599: By default the values, v, are row-oriented and unsorted.
1600: See MatSetOption() for other options.
1602: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1603: options cannot be mixed without intervening calls to the assembly
1604: routines.
1606: The grid coordinates are across the entire grid, not just the local portion
1608: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1609: as well as in C.
1611: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1613: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1614: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1616: The columns and rows in the stencil passed in MUST be contained within the
1617: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1618: if you create a DMDA with an overlap of one grid level and on a particular process its first
1619: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1620: first i index you can use in your column and row indices in MatSetStencil() is 5.
1622: In Fortran idxm and idxn should be declared as
1623: $ MatStencil idxm(4,m),idxn(4,n)
1624: and the values inserted using
1625: $ idxm(MatStencil_i,1) = i
1626: $ idxm(MatStencil_j,1) = j
1627: $ idxm(MatStencil_k,1) = k
1628: etc
1630: Negative indices may be passed in idxm and idxn, these rows and columns are
1631: simply ignored. This allows easily inserting element stiffness matrices
1632: with homogeneous Dirchlet boundary conditions that you don't want represented
1633: in the matrix.
1635: Inspired by the structured grid interface to the HYPRE package
1636: (http://www.llnl.gov/CASC/hypre)
1638: Level: beginner
1640: Concepts: matrices^putting entries in
1642: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1643: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1644: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1645: @*/
1646: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1647: {
1649: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1650: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1651: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1654: if (!m || !n) return(0); /* no values to insert */
1661: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1662: jdxm = buf; jdxn = buf+m;
1663: } else {
1664: PetscMalloc2(m,&bufm,n,&bufn);
1665: jdxm = bufm; jdxn = bufn;
1666: }
1667: for (i=0; i<m; i++) {
1668: for (j=0; j<3-sdim; j++) dxm++;
1669: tmp = *dxm++ - starts[0];
1670: for (j=0; j<sdim-1; j++) {
1671: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1672: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1673: }
1674: dxm++;
1675: jdxm[i] = tmp;
1676: }
1677: for (i=0; i<n; i++) {
1678: for (j=0; j<3-sdim; j++) dxn++;
1679: tmp = *dxn++ - starts[0];
1680: for (j=0; j<sdim-1; j++) {
1681: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1682: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1683: }
1684: dxn++;
1685: jdxn[i] = tmp;
1686: }
1687: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1688: PetscFree2(bufm,bufn);
1689: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1690: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1691: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1692: }
1693: #endif
1694: return(0);
1695: }
1697: /*@
1698: MatSetStencil - Sets the grid information for setting values into a matrix via
1699: MatSetValuesStencil()
1701: Not Collective
1703: Input Parameters:
1704: + mat - the matrix
1705: . dim - dimension of the grid 1, 2, or 3
1706: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1707: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1708: - dof - number of degrees of freedom per node
1711: Inspired by the structured grid interface to the HYPRE package
1712: (www.llnl.gov/CASC/hyper)
1714: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1715: user.
1717: Level: beginner
1719: Concepts: matrices^putting entries in
1721: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1722: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1723: @*/
1724: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1725: {
1726: PetscInt i;
1733: mat->stencil.dim = dim + (dof > 1);
1734: for (i=0; i<dim; i++) {
1735: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1736: mat->stencil.starts[i] = starts[dim-i-1];
1737: }
1738: mat->stencil.dims[dim] = dof;
1739: mat->stencil.starts[dim] = 0;
1740: mat->stencil.noc = (PetscBool)(dof == 1);
1741: return(0);
1742: }
1744: /*@C
1745: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1747: Not Collective
1749: Input Parameters:
1750: + mat - the matrix
1751: . v - a logically two-dimensional array of values
1752: . m, idxm - the number of block rows and their global block indices
1753: . n, idxn - the number of block columns and their global block indices
1754: - addv - either ADD_VALUES or INSERT_VALUES, where
1755: ADD_VALUES adds values to any existing entries, and
1756: INSERT_VALUES replaces existing entries with new values
1758: Notes:
1759: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1760: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1762: The m and n count the NUMBER of blocks in the row direction and column direction,
1763: NOT the total number of rows/columns; for example, if the block size is 2 and
1764: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1765: The values in idxm would be 1 2; that is the first index for each block divided by
1766: the block size.
1768: Note that you must call MatSetBlockSize() when constructing this matrix (before
1769: preallocating it).
1771: By default the values, v, are row-oriented, so the layout of
1772: v is the same as for MatSetValues(). See MatSetOption() for other options.
1774: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1775: options cannot be mixed without intervening calls to the assembly
1776: routines.
1778: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1779: as well as in C.
1781: Negative indices may be passed in idxm and idxn, these rows and columns are
1782: simply ignored. This allows easily inserting element stiffness matrices
1783: with homogeneous Dirchlet boundary conditions that you don't want represented
1784: in the matrix.
1786: Each time an entry is set within a sparse matrix via MatSetValues(),
1787: internal searching must be done to determine where to place the
1788: data in the matrix storage space. By instead inserting blocks of
1789: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1790: reduced.
1792: Example:
1793: $ Suppose m=n=2 and block size(bs) = 2 The array is
1794: $
1795: $ 1 2 | 3 4
1796: $ 5 6 | 7 8
1797: $ - - - | - - -
1798: $ 9 10 | 11 12
1799: $ 13 14 | 15 16
1800: $
1801: $ v[] should be passed in like
1802: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1803: $
1804: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1805: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1807: Level: intermediate
1809: Concepts: matrices^putting entries in blocked
1811: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1812: @*/
1813: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1814: {
1820: if (!m || !n) return(0); /* no values to insert */
1824: MatCheckPreallocated(mat,1);
1825: if (mat->insertmode == NOT_SET_VALUES) {
1826: mat->insertmode = addv;
1827: }
1828: #if defined(PETSC_USE_DEBUG)
1829: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1830: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1831: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1832: #endif
1834: if (mat->assembled) {
1835: mat->was_assembled = PETSC_TRUE;
1836: mat->assembled = PETSC_FALSE;
1837: }
1838: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1839: if (mat->ops->setvaluesblocked) {
1840: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1841: } else {
1842: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1843: PetscInt i,j,bs,cbs;
1844: MatGetBlockSizes(mat,&bs,&cbs);
1845: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1846: iidxm = buf; iidxn = buf + m*bs;
1847: } else {
1848: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1849: iidxm = bufr; iidxn = bufc;
1850: }
1851: for (i=0; i<m; i++) {
1852: for (j=0; j<bs; j++) {
1853: iidxm[i*bs+j] = bs*idxm[i] + j;
1854: }
1855: }
1856: for (i=0; i<n; i++) {
1857: for (j=0; j<cbs; j++) {
1858: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1859: }
1860: }
1861: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1862: PetscFree2(bufr,bufc);
1863: }
1864: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1865: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
1866: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
1867: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
1868: }
1869: #endif
1870: return(0);
1871: }
1873: /*@
1874: MatGetValues - Gets a block of values from a matrix.
1876: Not Collective; currently only returns a local block
1878: Input Parameters:
1879: + mat - the matrix
1880: . v - a logically two-dimensional array for storing the values
1881: . m, idxm - the number of rows and their global indices
1882: - n, idxn - the number of columns and their global indices
1884: Notes:
1885: The user must allocate space (m*n PetscScalars) for the values, v.
1886: The values, v, are then returned in a row-oriented format,
1887: analogous to that used by default in MatSetValues().
1889: MatGetValues() uses 0-based row and column numbers in
1890: Fortran as well as in C.
1892: MatGetValues() requires that the matrix has been assembled
1893: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1894: MatSetValues() and MatGetValues() CANNOT be made in succession
1895: without intermediate matrix assembly.
1897: Negative row or column indices will be ignored and those locations in v[] will be
1898: left unchanged.
1900: Level: advanced
1902: Concepts: matrices^accessing values
1904: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues()
1905: @*/
1906: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1907: {
1913: if (!m || !n) return(0);
1917: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1918: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1919: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1920: MatCheckPreallocated(mat,1);
1922: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1923: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1924: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1925: return(0);
1926: }
1928: /*@
1929: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1930: the same size. Currently, this can only be called once and creates the given matrix.
1932: Not Collective
1934: Input Parameters:
1935: + mat - the matrix
1936: . nb - the number of blocks
1937: . bs - the number of rows (and columns) in each block
1938: . rows - a concatenation of the rows for each block
1939: - v - a concatenation of logically two-dimensional arrays of values
1941: Notes:
1942: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1944: Level: advanced
1946: Concepts: matrices^putting entries in
1948: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1949: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1950: @*/
1951: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1952: {
1960: #if defined(PETSC_USE_DEBUG)
1961: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1962: #endif
1964: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1965: if (mat->ops->setvaluesbatch) {
1966: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1967: } else {
1968: PetscInt b;
1969: for (b = 0; b < nb; ++b) {
1970: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1971: }
1972: }
1973: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1974: return(0);
1975: }
1977: /*@
1978: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1979: the routine MatSetValuesLocal() to allow users to insert matrix entries
1980: using a local (per-processor) numbering.
1982: Not Collective
1984: Input Parameters:
1985: + x - the matrix
1986: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1987: - cmapping - column mapping
1989: Level: intermediate
1991: Concepts: matrices^local to global mapping
1992: Concepts: local to global mapping^for matrices
1994: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1995: @*/
1996: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1997: {
2006: if (x->ops->setlocaltoglobalmapping) {
2007: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
2008: } else {
2009: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
2010: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
2011: }
2012: return(0);
2013: }
2016: /*@
2017: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
2019: Not Collective
2021: Input Parameters:
2022: . A - the matrix
2024: Output Parameters:
2025: + rmapping - row mapping
2026: - cmapping - column mapping
2028: Level: advanced
2030: Concepts: matrices^local to global mapping
2031: Concepts: local to global mapping^for matrices
2033: .seealso: MatSetValuesLocal()
2034: @*/
2035: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
2036: {
2042: if (rmapping) *rmapping = A->rmap->mapping;
2043: if (cmapping) *cmapping = A->cmap->mapping;
2044: return(0);
2045: }
2047: /*@
2048: MatGetLayouts - Gets the PetscLayout objects for rows and columns
2050: Not Collective
2052: Input Parameters:
2053: . A - the matrix
2055: Output Parameters:
2056: + rmap - row layout
2057: - cmap - column layout
2059: Level: advanced
2061: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping()
2062: @*/
2063: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2064: {
2070: if (rmap) *rmap = A->rmap;
2071: if (cmap) *cmap = A->cmap;
2072: return(0);
2073: }
2075: /*@C
2076: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2077: using a local ordering of the nodes.
2079: Not Collective
2081: Input Parameters:
2082: + mat - the matrix
2083: . nrow, irow - number of rows and their local indices
2084: . ncol, icol - number of columns and their local indices
2085: . y - a logically two-dimensional array of values
2086: - addv - either INSERT_VALUES or ADD_VALUES, where
2087: ADD_VALUES adds values to any existing entries, and
2088: INSERT_VALUES replaces existing entries with new values
2090: Notes:
2091: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2092: MatSetUp() before using this routine
2094: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
2096: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2097: options cannot be mixed without intervening calls to the assembly
2098: routines.
2100: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2101: MUST be called after all calls to MatSetValuesLocal() have been completed.
2103: Level: intermediate
2105: Concepts: matrices^putting entries in with local numbering
2107: Developer Notes:
2108: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2109: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2111: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2112: MatSetValueLocal()
2113: @*/
2114: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2115: {
2121: MatCheckPreallocated(mat,1);
2122: if (!nrow || !ncol) return(0); /* no values to insert */
2126: if (mat->insertmode == NOT_SET_VALUES) {
2127: mat->insertmode = addv;
2128: }
2129: #if defined(PETSC_USE_DEBUG)
2130: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2131: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2132: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2133: #endif
2135: if (mat->assembled) {
2136: mat->was_assembled = PETSC_TRUE;
2137: mat->assembled = PETSC_FALSE;
2138: }
2139: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2140: if (mat->ops->setvalueslocal) {
2141: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2142: } else {
2143: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2144: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2145: irowm = buf; icolm = buf+nrow;
2146: } else {
2147: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2148: irowm = bufr; icolm = bufc;
2149: }
2150: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2151: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2152: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2153: PetscFree2(bufr,bufc);
2154: }
2155: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2156: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2157: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2158: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2159: }
2160: #endif
2161: return(0);
2162: }
2164: /*@C
2165: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2166: using a local ordering of the nodes a block at a time.
2168: Not Collective
2170: Input Parameters:
2171: + x - the matrix
2172: . nrow, irow - number of rows and their local indices
2173: . ncol, icol - number of columns and their local indices
2174: . y - a logically two-dimensional array of values
2175: - addv - either INSERT_VALUES or ADD_VALUES, where
2176: ADD_VALUES adds values to any existing entries, and
2177: INSERT_VALUES replaces existing entries with new values
2179: Notes:
2180: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2181: MatSetUp() before using this routine
2183: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2184: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2186: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2187: options cannot be mixed without intervening calls to the assembly
2188: routines.
2190: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2191: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2193: Level: intermediate
2195: Developer Notes:
2196: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2197: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2199: Concepts: matrices^putting blocked values in with local numbering
2201: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2202: MatSetValuesLocal(), MatSetValuesBlocked()
2203: @*/
2204: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2205: {
2211: MatCheckPreallocated(mat,1);
2212: if (!nrow || !ncol) return(0); /* no values to insert */
2216: if (mat->insertmode == NOT_SET_VALUES) {
2217: mat->insertmode = addv;
2218: }
2219: #if defined(PETSC_USE_DEBUG)
2220: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2221: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2222: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2223: #endif
2225: if (mat->assembled) {
2226: mat->was_assembled = PETSC_TRUE;
2227: mat->assembled = PETSC_FALSE;
2228: }
2229: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2230: if (mat->ops->setvaluesblockedlocal) {
2231: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2232: } else {
2233: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2234: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2235: irowm = buf; icolm = buf + nrow;
2236: } else {
2237: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2238: irowm = bufr; icolm = bufc;
2239: }
2240: ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2241: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2242: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2243: PetscFree2(bufr,bufc);
2244: }
2245: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2246: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
2247: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
2248: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
2249: }
2250: #endif
2251: return(0);
2252: }
2254: /*@
2255: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2257: Collective on Mat and Vec
2259: Input Parameters:
2260: + mat - the matrix
2261: - x - the vector to be multiplied
2263: Output Parameters:
2264: . y - the result
2266: Notes:
2267: The vectors x and y cannot be the same. I.e., one cannot
2268: call MatMult(A,y,y).
2270: Level: developer
2272: Concepts: matrix-vector product
2274: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2275: @*/
2276: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2277: {
2286: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2287: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2288: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2289: MatCheckPreallocated(mat,1);
2291: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2292: (*mat->ops->multdiagonalblock)(mat,x,y);
2293: PetscObjectStateIncrease((PetscObject)y);
2294: return(0);
2295: }
2297: /* --------------------------------------------------------*/
2298: /*@
2299: MatMult - Computes the matrix-vector product, y = Ax.
2301: Neighbor-wise Collective on Mat and Vec
2303: Input Parameters:
2304: + mat - the matrix
2305: - x - the vector to be multiplied
2307: Output Parameters:
2308: . y - the result
2310: Notes:
2311: The vectors x and y cannot be the same. I.e., one cannot
2312: call MatMult(A,y,y).
2314: Level: beginner
2316: Concepts: matrix-vector product
2318: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2319: @*/
2320: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2321: {
2329: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2330: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2331: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2332: #if !defined(PETSC_HAVE_CONSTRAINTS)
2333: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2334: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2335: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2336: #endif
2337: VecLocked(y,3);
2338: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2339: MatCheckPreallocated(mat,1);
2341: VecLockPush(x);
2342: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2343: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2344: (*mat->ops->mult)(mat,x,y);
2345: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2346: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2347: VecLockPop(x);
2348: return(0);
2349: }
2351: /*@
2352: MatMultTranspose - Computes matrix transpose times a vector y = A^T * x.
2354: Neighbor-wise Collective on Mat and Vec
2356: Input Parameters:
2357: + mat - the matrix
2358: - x - the vector to be multiplied
2360: Output Parameters:
2361: . y - the result
2363: Notes:
2364: The vectors x and y cannot be the same. I.e., one cannot
2365: call MatMultTranspose(A,y,y).
2367: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2368: use MatMultHermitianTranspose()
2370: Level: beginner
2372: Concepts: matrix vector product^transpose
2374: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2375: @*/
2376: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2377: {
2386: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2387: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2388: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2389: #if !defined(PETSC_HAVE_CONSTRAINTS)
2390: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2391: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2392: #endif
2393: if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2394: MatCheckPreallocated(mat,1);
2396: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply transpose defined");
2397: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2398: VecLockPush(x);
2399: (*mat->ops->multtranspose)(mat,x,y);
2400: VecLockPop(x);
2401: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2402: PetscObjectStateIncrease((PetscObject)y);
2403: if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2404: return(0);
2405: }
2407: /*@
2408: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2410: Neighbor-wise Collective on Mat and Vec
2412: Input Parameters:
2413: + mat - the matrix
2414: - x - the vector to be multilplied
2416: Output Parameters:
2417: . y - the result
2419: Notes:
2420: The vectors x and y cannot be the same. I.e., one cannot
2421: call MatMultHermitianTranspose(A,y,y).
2423: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2425: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2427: Level: beginner
2429: Concepts: matrix vector product^transpose
2431: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2432: @*/
2433: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2434: {
2436: Vec w;
2444: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2445: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2446: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2447: #if !defined(PETSC_HAVE_CONSTRAINTS)
2448: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2449: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2450: #endif
2451: MatCheckPreallocated(mat,1);
2453: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2454: if (mat->ops->multhermitiantranspose) {
2455: VecLockPush(x);
2456: (*mat->ops->multhermitiantranspose)(mat,x,y);
2457: VecLockPop(x);
2458: } else {
2459: VecDuplicate(x,&w);
2460: VecCopy(x,w);
2461: VecConjugate(w);
2462: MatMultTranspose(mat,w,y);
2463: VecDestroy(&w);
2464: VecConjugate(y);
2465: }
2466: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2467: PetscObjectStateIncrease((PetscObject)y);
2468: return(0);
2469: }
2471: /*@
2472: MatMultAdd - Computes v3 = v2 + A * v1.
2474: Neighbor-wise Collective on Mat and Vec
2476: Input Parameters:
2477: + mat - the matrix
2478: - v1, v2 - the vectors
2480: Output Parameters:
2481: . v3 - the result
2483: Notes:
2484: The vectors v1 and v3 cannot be the same. I.e., one cannot
2485: call MatMultAdd(A,v1,v2,v1).
2487: Level: beginner
2489: Concepts: matrix vector product^addition
2491: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2492: @*/
2493: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2494: {
2504: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2505: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2506: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2507: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2508: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2509: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2510: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2511: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2512: MatCheckPreallocated(mat,1);
2514: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2515: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2516: VecLockPush(v1);
2517: (*mat->ops->multadd)(mat,v1,v2,v3);
2518: VecLockPop(v1);
2519: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2520: PetscObjectStateIncrease((PetscObject)v3);
2521: return(0);
2522: }
2524: /*@
2525: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2527: Neighbor-wise Collective on Mat and Vec
2529: Input Parameters:
2530: + mat - the matrix
2531: - v1, v2 - the vectors
2533: Output Parameters:
2534: . v3 - the result
2536: Notes:
2537: The vectors v1 and v3 cannot be the same. I.e., one cannot
2538: call MatMultTransposeAdd(A,v1,v2,v1).
2540: Level: beginner
2542: Concepts: matrix vector product^transpose and addition
2544: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2545: @*/
2546: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2547: {
2557: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2558: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2559: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2560: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2561: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2562: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2563: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2564: MatCheckPreallocated(mat,1);
2566: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2567: VecLockPush(v1);
2568: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2569: VecLockPop(v1);
2570: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2571: PetscObjectStateIncrease((PetscObject)v3);
2572: return(0);
2573: }
2575: /*@
2576: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2578: Neighbor-wise Collective on Mat and Vec
2580: Input Parameters:
2581: + mat - the matrix
2582: - v1, v2 - the vectors
2584: Output Parameters:
2585: . v3 - the result
2587: Notes:
2588: The vectors v1 and v3 cannot be the same. I.e., one cannot
2589: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2591: Level: beginner
2593: Concepts: matrix vector product^transpose and addition
2595: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2596: @*/
2597: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2598: {
2608: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2609: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2610: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2611: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2612: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2613: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2614: MatCheckPreallocated(mat,1);
2616: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2617: VecLockPush(v1);
2618: if (mat->ops->multhermitiantransposeadd) {
2619: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2620: } else {
2621: Vec w,z;
2622: VecDuplicate(v1,&w);
2623: VecCopy(v1,w);
2624: VecConjugate(w);
2625: VecDuplicate(v3,&z);
2626: MatMultTranspose(mat,w,z);
2627: VecDestroy(&w);
2628: VecConjugate(z);
2629: VecWAXPY(v3,1.0,v2,z);
2630: VecDestroy(&z);
2631: }
2632: VecLockPop(v1);
2633: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2634: PetscObjectStateIncrease((PetscObject)v3);
2635: return(0);
2636: }
2638: /*@
2639: MatMultConstrained - The inner multiplication routine for a
2640: constrained matrix P^T A P.
2642: Neighbor-wise Collective on Mat and Vec
2644: Input Parameters:
2645: + mat - the matrix
2646: - x - the vector to be multilplied
2648: Output Parameters:
2649: . y - the result
2651: Notes:
2652: The vectors x and y cannot be the same. I.e., one cannot
2653: call MatMult(A,y,y).
2655: Level: beginner
2657: .keywords: matrix, multiply, matrix-vector product, constraint
2658: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2659: @*/
2660: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2661: {
2668: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2669: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2670: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2671: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2672: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2673: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2675: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2676: VecLockPush(x);
2677: (*mat->ops->multconstrained)(mat,x,y);
2678: VecLockPop(x);
2679: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2680: PetscObjectStateIncrease((PetscObject)y);
2681: return(0);
2682: }
2684: /*@
2685: MatMultTransposeConstrained - The inner multiplication routine for a
2686: constrained matrix P^T A^T P.
2688: Neighbor-wise Collective on Mat and Vec
2690: Input Parameters:
2691: + mat - the matrix
2692: - x - the vector to be multilplied
2694: Output Parameters:
2695: . y - the result
2697: Notes:
2698: The vectors x and y cannot be the same. I.e., one cannot
2699: call MatMult(A,y,y).
2701: Level: beginner
2703: .keywords: matrix, multiply, matrix-vector product, constraint
2704: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2705: @*/
2706: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2707: {
2714: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2715: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2716: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2717: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2718: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2720: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2721: (*mat->ops->multtransposeconstrained)(mat,x,y);
2722: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2723: PetscObjectStateIncrease((PetscObject)y);
2724: return(0);
2725: }
2727: /*@C
2728: MatGetFactorType - gets the type of factorization it is
2730: Note Collective
2731: as the flag
2733: Input Parameters:
2734: . mat - the matrix
2736: Output Parameters:
2737: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2739: Level: intermediate
2741: .seealso: MatFactorType, MatGetFactor()
2742: @*/
2743: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2744: {
2748: *t = mat->factortype;
2749: return(0);
2750: }
2752: /* ------------------------------------------------------------*/
2753: /*@C
2754: MatGetInfo - Returns information about matrix storage (number of
2755: nonzeros, memory, etc.).
2757: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2759: Input Parameters:
2760: . mat - the matrix
2762: Output Parameters:
2763: + flag - flag indicating the type of parameters to be returned
2764: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2765: MAT_GLOBAL_SUM - sum over all processors)
2766: - info - matrix information context
2768: Notes:
2769: The MatInfo context contains a variety of matrix data, including
2770: number of nonzeros allocated and used, number of mallocs during
2771: matrix assembly, etc. Additional information for factored matrices
2772: is provided (such as the fill ratio, number of mallocs during
2773: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2774: when using the runtime options
2775: $ -info -mat_view ::ascii_info
2777: Example for C/C++ Users:
2778: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2779: data within the MatInfo context. For example,
2780: .vb
2781: MatInfo info;
2782: Mat A;
2783: double mal, nz_a, nz_u;
2785: MatGetInfo(A,MAT_LOCAL,&info);
2786: mal = info.mallocs;
2787: nz_a = info.nz_allocated;
2788: .ve
2790: Example for Fortran Users:
2791: Fortran users should declare info as a double precision
2792: array of dimension MAT_INFO_SIZE, and then extract the parameters
2793: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2794: a complete list of parameter names.
2795: .vb
2796: double precision info(MAT_INFO_SIZE)
2797: double precision mal, nz_a
2798: Mat A
2799: integer ierr
2801: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2802: mal = info(MAT_INFO_MALLOCS)
2803: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2804: .ve
2806: Level: intermediate
2808: Concepts: matrices^getting information on
2810: Developer Note: fortran interface is not autogenerated as the f90
2811: interface defintion cannot be generated correctly [due to MatInfo]
2813: .seealso: MatStashGetInfo()
2815: @*/
2816: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2817: {
2824: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2825: MatCheckPreallocated(mat,1);
2826: (*mat->ops->getinfo)(mat,flag,info);
2827: return(0);
2828: }
2830: /*
2831: This is used by external packages where it is not easy to get the info from the actual
2832: matrix factorization.
2833: */
2834: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2835: {
2839: PetscMemzero(info,sizeof(MatInfo));
2840: return(0);
2841: }
2843: /* ----------------------------------------------------------*/
2845: /*@C
2846: MatLUFactor - Performs in-place LU factorization of matrix.
2848: Collective on Mat
2850: Input Parameters:
2851: + mat - the matrix
2852: . row - row permutation
2853: . col - column permutation
2854: - info - options for factorization, includes
2855: $ fill - expected fill as ratio of original fill.
2856: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2857: $ Run with the option -info to determine an optimal value to use
2859: Notes:
2860: Most users should employ the simplified KSP interface for linear solvers
2861: instead of working directly with matrix algebra routines such as this.
2862: See, e.g., KSPCreate().
2864: This changes the state of the matrix to a factored matrix; it cannot be used
2865: for example with MatSetValues() unless one first calls MatSetUnfactored().
2867: Level: developer
2869: Concepts: matrices^LU factorization
2871: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2872: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2874: Developer Note: fortran interface is not autogenerated as the f90
2875: interface defintion cannot be generated correctly [due to MatFactorInfo]
2877: @*/
2878: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2879: {
2881: MatFactorInfo tinfo;
2889: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2890: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2891: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2892: MatCheckPreallocated(mat,1);
2893: if (!info) {
2894: MatFactorInfoInitialize(&tinfo);
2895: info = &tinfo;
2896: }
2898: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2899: (*mat->ops->lufactor)(mat,row,col,info);
2900: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2901: PetscObjectStateIncrease((PetscObject)mat);
2902: return(0);
2903: }
2905: /*@C
2906: MatILUFactor - Performs in-place ILU factorization of matrix.
2908: Collective on Mat
2910: Input Parameters:
2911: + mat - the matrix
2912: . row - row permutation
2913: . col - column permutation
2914: - info - structure containing
2915: $ levels - number of levels of fill.
2916: $ expected fill - as ratio of original fill.
2917: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2918: missing diagonal entries)
2920: Notes:
2921: Probably really in-place only when level of fill is zero, otherwise allocates
2922: new space to store factored matrix and deletes previous memory.
2924: Most users should employ the simplified KSP interface for linear solvers
2925: instead of working directly with matrix algebra routines such as this.
2926: See, e.g., KSPCreate().
2928: Level: developer
2930: Concepts: matrices^ILU factorization
2932: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2934: Developer Note: fortran interface is not autogenerated as the f90
2935: interface defintion cannot be generated correctly [due to MatFactorInfo]
2937: @*/
2938: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2939: {
2948: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2949: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2950: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2951: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2952: MatCheckPreallocated(mat,1);
2954: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2955: (*mat->ops->ilufactor)(mat,row,col,info);
2956: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2957: PetscObjectStateIncrease((PetscObject)mat);
2958: return(0);
2959: }
2961: /*@C
2962: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2963: Call this routine before calling MatLUFactorNumeric().
2965: Collective on Mat
2967: Input Parameters:
2968: + fact - the factor matrix obtained with MatGetFactor()
2969: . mat - the matrix
2970: . row, col - row and column permutations
2971: - info - options for factorization, includes
2972: $ fill - expected fill as ratio of original fill.
2973: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2974: $ Run with the option -info to determine an optimal value to use
2977: Notes:
2978: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2980: Most users should employ the simplified KSP interface for linear solvers
2981: instead of working directly with matrix algebra routines such as this.
2982: See, e.g., KSPCreate().
2984: Level: developer
2986: Concepts: matrices^LU symbolic factorization
2988: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2990: Developer Note: fortran interface is not autogenerated as the f90
2991: interface defintion cannot be generated correctly [due to MatFactorInfo]
2993: @*/
2994: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2995: {
3005: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3006: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3007: if (!(fact)->ops->lufactorsymbolic) {
3008: MatSolverType spackage;
3009: MatFactorGetSolverType(fact,&spackage);
3010: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
3011: }
3012: MatCheckPreallocated(mat,2);
3014: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
3015: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
3016: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
3017: PetscObjectStateIncrease((PetscObject)fact);
3018: return(0);
3019: }
3021: /*@C
3022: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
3023: Call this routine after first calling MatLUFactorSymbolic().
3025: Collective on Mat
3027: Input Parameters:
3028: + fact - the factor matrix obtained with MatGetFactor()
3029: . mat - the matrix
3030: - info - options for factorization
3032: Notes:
3033: See MatLUFactor() for in-place factorization. See
3034: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
3036: Most users should employ the simplified KSP interface for linear solvers
3037: instead of working directly with matrix algebra routines such as this.
3038: See, e.g., KSPCreate().
3040: Level: developer
3042: Concepts: matrices^LU numeric factorization
3044: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
3046: Developer Note: fortran interface is not autogenerated as the f90
3047: interface defintion cannot be generated correctly [due to MatFactorInfo]
3049: @*/
3050: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3051: {
3059: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3060: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3062: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
3063: MatCheckPreallocated(mat,2);
3064: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
3065: (fact->ops->lufactornumeric)(fact,mat,info);
3066: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
3067: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3068: PetscObjectStateIncrease((PetscObject)fact);
3069: return(0);
3070: }
3072: /*@C
3073: MatCholeskyFactor - Performs in-place Cholesky factorization of a
3074: symmetric matrix.
3076: Collective on Mat
3078: Input Parameters:
3079: + mat - the matrix
3080: . perm - row and column permutations
3081: - f - expected fill as ratio of original fill
3083: Notes:
3084: See MatLUFactor() for the nonsymmetric case. See also
3085: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
3087: Most users should employ the simplified KSP interface for linear solvers
3088: instead of working directly with matrix algebra routines such as this.
3089: See, e.g., KSPCreate().
3091: Level: developer
3093: Concepts: matrices^Cholesky factorization
3095: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3096: MatGetOrdering()
3098: Developer Note: fortran interface is not autogenerated as the f90
3099: interface defintion cannot be generated correctly [due to MatFactorInfo]
3101: @*/
3102: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3103: {
3111: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3112: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3113: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3114: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"In-place factorization for Mat type %s is not supported, try out-of-place factorization. See MatCholeskyFactorSymbolic/Numeric",((PetscObject)mat)->type_name);
3115: MatCheckPreallocated(mat,1);
3117: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3118: (*mat->ops->choleskyfactor)(mat,perm,info);
3119: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3120: PetscObjectStateIncrease((PetscObject)mat);
3121: return(0);
3122: }
3124: /*@C
3125: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3126: of a symmetric matrix.
3128: Collective on Mat
3130: Input Parameters:
3131: + fact - the factor matrix obtained with MatGetFactor()
3132: . mat - the matrix
3133: . perm - row and column permutations
3134: - info - options for factorization, includes
3135: $ fill - expected fill as ratio of original fill.
3136: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3137: $ Run with the option -info to determine an optimal value to use
3139: Notes:
3140: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3141: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3143: Most users should employ the simplified KSP interface for linear solvers
3144: instead of working directly with matrix algebra routines such as this.
3145: See, e.g., KSPCreate().
3147: Level: developer
3149: Concepts: matrices^Cholesky symbolic factorization
3151: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3152: MatGetOrdering()
3154: Developer Note: fortran interface is not autogenerated as the f90
3155: interface defintion cannot be generated correctly [due to MatFactorInfo]
3157: @*/
3158: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3159: {
3168: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3169: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3170: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3171: if (!(fact)->ops->choleskyfactorsymbolic) {
3172: MatSolverType spackage;
3173: MatFactorGetSolverType(fact,&spackage);
3174: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3175: }
3176: MatCheckPreallocated(mat,2);
3178: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3179: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3180: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3181: PetscObjectStateIncrease((PetscObject)fact);
3182: return(0);
3183: }
3185: /*@C
3186: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3187: of a symmetric matrix. Call this routine after first calling
3188: MatCholeskyFactorSymbolic().
3190: Collective on Mat
3192: Input Parameters:
3193: + fact - the factor matrix obtained with MatGetFactor()
3194: . mat - the initial matrix
3195: . info - options for factorization
3196: - fact - the symbolic factor of mat
3199: Notes:
3200: Most users should employ the simplified KSP interface for linear solvers
3201: instead of working directly with matrix algebra routines such as this.
3202: See, e.g., KSPCreate().
3204: Level: developer
3206: Concepts: matrices^Cholesky numeric factorization
3208: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3210: Developer Note: fortran interface is not autogenerated as the f90
3211: interface defintion cannot be generated correctly [due to MatFactorInfo]
3213: @*/
3214: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3215: {
3223: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3224: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3225: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3226: MatCheckPreallocated(mat,2);
3228: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3229: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3230: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3231: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3232: PetscObjectStateIncrease((PetscObject)fact);
3233: return(0);
3234: }
3236: /* ----------------------------------------------------------------*/
3237: /*@
3238: MatSolve - Solves A x = b, given a factored matrix.
3240: Neighbor-wise Collective on Mat and Vec
3242: Input Parameters:
3243: + mat - the factored matrix
3244: - b - the right-hand-side vector
3246: Output Parameter:
3247: . x - the result vector
3249: Notes:
3250: The vectors b and x cannot be the same. I.e., one cannot
3251: call MatSolve(A,x,x).
3253: Notes:
3254: Most users should employ the simplified KSP interface for linear solvers
3255: instead of working directly with matrix algebra routines such as this.
3256: See, e.g., KSPCreate().
3258: Level: developer
3260: Concepts: matrices^triangular solves
3262: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3263: @*/
3264: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3265: {
3275: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3276: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3277: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3278: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3279: if (!mat->rmap->N && !mat->cmap->N) return(0);
3280: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3281: MatCheckPreallocated(mat,1);
3283: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3284: if (mat->factorerrortype) {
3285: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3286: VecSetInf(x);
3287: } else {
3288: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3289: (*mat->ops->solve)(mat,b,x);
3290: }
3291: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3292: PetscObjectStateIncrease((PetscObject)x);
3293: return(0);
3294: }
3296: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X, PetscBool trans)
3297: {
3299: Vec b,x;
3300: PetscInt m,N,i;
3301: PetscScalar *bb,*xx;
3302: PetscBool flg;
3305: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3306: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3307: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3308: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3310: MatDenseGetArray(B,&bb);
3311: MatDenseGetArray(X,&xx);
3312: MatGetLocalSize(B,&m,NULL); /* number local rows */
3313: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3314: MatCreateVecs(A,&x,&b);
3315: for (i=0; i<N; i++) {
3316: VecPlaceArray(b,bb + i*m);
3317: VecPlaceArray(x,xx + i*m);
3318: if (trans) {
3319: MatSolveTranspose(A,b,x);
3320: } else {
3321: MatSolve(A,b,x);
3322: }
3323: VecResetArray(x);
3324: VecResetArray(b);
3325: }
3326: VecDestroy(&b);
3327: VecDestroy(&x);
3328: MatDenseRestoreArray(B,&bb);
3329: MatDenseRestoreArray(X,&xx);
3330: return(0);
3331: }
3333: /*@
3334: MatMatSolve - Solves A X = B, given a factored matrix.
3336: Neighbor-wise Collective on Mat
3338: Input Parameters:
3339: + A - the factored matrix
3340: - B - the right-hand-side matrix (dense matrix)
3342: Output Parameter:
3343: . X - the result matrix (dense matrix)
3345: Notes:
3346: The matrices b and x cannot be the same. I.e., one cannot
3347: call MatMatSolve(A,x,x).
3349: Notes:
3350: Most users should usually employ the simplified KSP interface for linear solvers
3351: instead of working directly with matrix algebra routines such as this.
3352: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3353: at a time.
3355: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3356: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3358: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3360: Level: developer
3362: Concepts: matrices^triangular solves
3364: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3365: @*/
3366: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3367: {
3377: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3378: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3379: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3380: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3381: if (!A->rmap->N && !A->cmap->N) return(0);
3382: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3383: MatCheckPreallocated(A,1);
3385: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3386: if (!A->ops->matsolve) {
3387: PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3388: MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3389: } else {
3390: (*A->ops->matsolve)(A,B,X);
3391: }
3392: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3393: PetscObjectStateIncrease((PetscObject)X);
3394: return(0);
3395: }
3397: /*@
3398: MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.
3400: Neighbor-wise Collective on Mat
3402: Input Parameters:
3403: + A - the factored matrix
3404: - B - the right-hand-side matrix (dense matrix)
3406: Output Parameter:
3407: . X - the result matrix (dense matrix)
3409: Notes:
3410: The matrices B and X cannot be the same. I.e., one cannot
3411: call MatMatSolveTranspose(A,X,X).
3413: Notes:
3414: Most users should usually employ the simplified KSP interface for linear solvers
3415: instead of working directly with matrix algebra routines such as this.
3416: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3417: at a time.
3419: When using SuperLU_Dist or MUMPS as a parallel solver, PETSc will use their functionality to solve multiple right hand sides simultaneously.
3421: Level: developer
3423: Concepts: matrices^triangular solves
3425: .seealso: MatMatSolve(), MatLUFactor(), MatCholeskyFactor()
3426: @*/
3427: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3428: {
3438: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3439: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3440: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3441: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3442: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3443: if (!A->rmap->N && !A->cmap->N) return(0);
3444: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3445: MatCheckPreallocated(A,1);
3447: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3448: if (!A->ops->matsolvetranspose) {
3449: PetscInfo1(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3450: MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3451: } else {
3452: (*A->ops->matsolvetranspose)(A,B,X);
3453: }
3454: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3455: PetscObjectStateIncrease((PetscObject)X);
3456: return(0);
3457: }
3459: /*@
3460: MatMatTransposeSolve - Solves A X = B^T, given a factored matrix.
3462: Neighbor-wise Collective on Mat
3464: Input Parameters:
3465: + A - the factored matrix
3466: - Bt - the transpose of right-hand-side matrix
3468: Output Parameter:
3469: . X - the result matrix (dense matrix)
3471: Notes:
3472: Most users should usually employ the simplified KSP interface for linear solvers
3473: instead of working directly with matrix algebra routines such as this.
3474: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3475: at a time.
3477: For MUMPS, it only supports centralized sparse compressed column format on the host processor for right hand side matrix. User must create B^T in sparse compressed row format on the host processor and call MatMatTransposeSolve() to implement MUMPS' MatMatSolve().
3479: Level: developer
3481: Concepts: matrices^triangular solves
3483: .seealso: MatMatSolve(), MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3484: @*/
3485: PetscErrorCode MatMatTransposeSolve(Mat A,Mat Bt,Mat X)
3486: {
3497: if (X == Bt) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3498: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3499: if (A->rmap->N != Bt->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat Bt: global dim %D %D",A->rmap->N,Bt->cmap->N);
3500: if (X->cmap->N < Bt->rmap->N) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as row number of the rhs matrix");
3501: if (!A->rmap->N && !A->cmap->N) return(0);
3502: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3503: MatCheckPreallocated(A,1);
3505: if (!A->ops->mattransposesolve) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
3506: PetscLogEventBegin(MAT_MatTrSolve,A,Bt,X,0);
3507: (*A->ops->mattransposesolve)(A,Bt,X);
3508: PetscLogEventEnd(MAT_MatTrSolve,A,Bt,X,0);
3509: PetscObjectStateIncrease((PetscObject)X);
3510: return(0);
3511: }
3513: /*@
3514: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3515: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3517: Neighbor-wise Collective on Mat and Vec
3519: Input Parameters:
3520: + mat - the factored matrix
3521: - b - the right-hand-side vector
3523: Output Parameter:
3524: . x - the result vector
3526: Notes:
3527: MatSolve() should be used for most applications, as it performs
3528: a forward solve followed by a backward solve.
3530: The vectors b and x cannot be the same, i.e., one cannot
3531: call MatForwardSolve(A,x,x).
3533: For matrix in seqsbaij format with block size larger than 1,
3534: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3535: MatForwardSolve() solves U^T*D y = b, and
3536: MatBackwardSolve() solves U x = y.
3537: Thus they do not provide a symmetric preconditioner.
3539: Most users should employ the simplified KSP interface for linear solvers
3540: instead of working directly with matrix algebra routines such as this.
3541: See, e.g., KSPCreate().
3543: Level: developer
3545: Concepts: matrices^forward solves
3547: .seealso: MatSolve(), MatBackwardSolve()
3548: @*/
3549: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3550: {
3560: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3561: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3562: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3563: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3564: if (!mat->rmap->N && !mat->cmap->N) return(0);
3565: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3566: MatCheckPreallocated(mat,1);
3568: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3569: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3570: (*mat->ops->forwardsolve)(mat,b,x);
3571: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3572: PetscObjectStateIncrease((PetscObject)x);
3573: return(0);
3574: }
3576: /*@
3577: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3578: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3580: Neighbor-wise Collective on Mat and Vec
3582: Input Parameters:
3583: + mat - the factored matrix
3584: - b - the right-hand-side vector
3586: Output Parameter:
3587: . x - the result vector
3589: Notes:
3590: MatSolve() should be used for most applications, as it performs
3591: a forward solve followed by a backward solve.
3593: The vectors b and x cannot be the same. I.e., one cannot
3594: call MatBackwardSolve(A,x,x).
3596: For matrix in seqsbaij format with block size larger than 1,
3597: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3598: MatForwardSolve() solves U^T*D y = b, and
3599: MatBackwardSolve() solves U x = y.
3600: Thus they do not provide a symmetric preconditioner.
3602: Most users should employ the simplified KSP interface for linear solvers
3603: instead of working directly with matrix algebra routines such as this.
3604: See, e.g., KSPCreate().
3606: Level: developer
3608: Concepts: matrices^backward solves
3610: .seealso: MatSolve(), MatForwardSolve()
3611: @*/
3612: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3613: {
3623: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3624: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3625: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3626: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3627: if (!mat->rmap->N && !mat->cmap->N) return(0);
3628: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3629: MatCheckPreallocated(mat,1);
3631: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3632: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3633: (*mat->ops->backwardsolve)(mat,b,x);
3634: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3635: PetscObjectStateIncrease((PetscObject)x);
3636: return(0);
3637: }
3639: /*@
3640: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3642: Neighbor-wise Collective on Mat and Vec
3644: Input Parameters:
3645: + mat - the factored matrix
3646: . b - the right-hand-side vector
3647: - y - the vector to be added to
3649: Output Parameter:
3650: . x - the result vector
3652: Notes:
3653: The vectors b and x cannot be the same. I.e., one cannot
3654: call MatSolveAdd(A,x,y,x).
3656: Most users should employ the simplified KSP interface for linear solvers
3657: instead of working directly with matrix algebra routines such as this.
3658: See, e.g., KSPCreate().
3660: Level: developer
3662: Concepts: matrices^triangular solves
3664: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3665: @*/
3666: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3667: {
3668: PetscScalar one = 1.0;
3669: Vec tmp;
3681: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3682: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3683: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3684: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3685: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3686: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3687: if (!mat->rmap->N && !mat->cmap->N) return(0);
3688: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3689: MatCheckPreallocated(mat,1);
3691: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3692: if (mat->ops->solveadd) {
3693: (*mat->ops->solveadd)(mat,b,y,x);
3694: } else {
3695: /* do the solve then the add manually */
3696: if (x != y) {
3697: MatSolve(mat,b,x);
3698: VecAXPY(x,one,y);
3699: } else {
3700: VecDuplicate(x,&tmp);
3701: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3702: VecCopy(x,tmp);
3703: MatSolve(mat,b,x);
3704: VecAXPY(x,one,tmp);
3705: VecDestroy(&tmp);
3706: }
3707: }
3708: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3709: PetscObjectStateIncrease((PetscObject)x);
3710: return(0);
3711: }
3713: /*@
3714: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3716: Neighbor-wise Collective on Mat and Vec
3718: Input Parameters:
3719: + mat - the factored matrix
3720: - b - the right-hand-side vector
3722: Output Parameter:
3723: . x - the result vector
3725: Notes:
3726: The vectors b and x cannot be the same. I.e., one cannot
3727: call MatSolveTranspose(A,x,x).
3729: Most users should employ the simplified KSP interface for linear solvers
3730: instead of working directly with matrix algebra routines such as this.
3731: See, e.g., KSPCreate().
3733: Level: developer
3735: Concepts: matrices^triangular solves
3737: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3738: @*/
3739: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3740: {
3750: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3751: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3752: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3753: if (!mat->rmap->N && !mat->cmap->N) return(0);
3754: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3755: MatCheckPreallocated(mat,1);
3756: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3757: if (mat->factorerrortype) {
3758: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3759: VecSetInf(x);
3760: } else {
3761: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3762: (*mat->ops->solvetranspose)(mat,b,x);
3763: }
3764: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3765: PetscObjectStateIncrease((PetscObject)x);
3766: return(0);
3767: }
3769: /*@
3770: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3771: factored matrix.
3773: Neighbor-wise Collective on Mat and Vec
3775: Input Parameters:
3776: + mat - the factored matrix
3777: . b - the right-hand-side vector
3778: - y - the vector to be added to
3780: Output Parameter:
3781: . x - the result vector
3783: Notes:
3784: The vectors b and x cannot be the same. I.e., one cannot
3785: call MatSolveTransposeAdd(A,x,y,x).
3787: Most users should employ the simplified KSP interface for linear solvers
3788: instead of working directly with matrix algebra routines such as this.
3789: See, e.g., KSPCreate().
3791: Level: developer
3793: Concepts: matrices^triangular solves
3795: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3796: @*/
3797: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3798: {
3799: PetscScalar one = 1.0;
3801: Vec tmp;
3812: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3813: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3814: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3815: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3816: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3817: if (!mat->rmap->N && !mat->cmap->N) return(0);
3818: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3819: MatCheckPreallocated(mat,1);
3821: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3822: if (mat->ops->solvetransposeadd) {
3823: if (mat->factorerrortype) {
3824: PetscInfo1(mat,"MatFactorError %D\n",mat->factorerrortype);
3825: VecSetInf(x);
3826: } else {
3827: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3828: }
3829: } else {
3830: /* do the solve then the add manually */
3831: if (x != y) {
3832: MatSolveTranspose(mat,b,x);
3833: VecAXPY(x,one,y);
3834: } else {
3835: VecDuplicate(x,&tmp);
3836: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3837: VecCopy(x,tmp);
3838: MatSolveTranspose(mat,b,x);
3839: VecAXPY(x,one,tmp);
3840: VecDestroy(&tmp);
3841: }
3842: }
3843: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3844: PetscObjectStateIncrease((PetscObject)x);
3845: return(0);
3846: }
3847: /* ----------------------------------------------------------------*/
3849: /*@
3850: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3852: Neighbor-wise Collective on Mat and Vec
3854: Input Parameters:
3855: + mat - the matrix
3856: . b - the right hand side
3857: . omega - the relaxation factor
3858: . flag - flag indicating the type of SOR (see below)
3859: . shift - diagonal shift
3860: . its - the number of iterations
3861: - lits - the number of local iterations
3863: Output Parameters:
3864: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3866: SOR Flags:
3867: . SOR_FORWARD_SWEEP - forward SOR
3868: . SOR_BACKWARD_SWEEP - backward SOR
3869: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3870: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3871: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3872: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3873: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3874: upper/lower triangular part of matrix to
3875: vector (with omega)
3876: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3878: Notes:
3879: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3880: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3881: on each processor.
3883: Application programmers will not generally use MatSOR() directly,
3884: but instead will employ the KSP/PC interface.
3886: Notes:
3887: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3889: Notes for Advanced Users:
3890: The flags are implemented as bitwise inclusive or operations.
3891: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3892: to specify a zero initial guess for SSOR.
3894: Most users should employ the simplified KSP interface for linear solvers
3895: instead of working directly with matrix algebra routines such as this.
3896: See, e.g., KSPCreate().
3898: Vectors x and b CANNOT be the same
3900: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3902: Level: developer
3904: Concepts: matrices^relaxation
3905: Concepts: matrices^SOR
3906: Concepts: matrices^Gauss-Seidel
3908: @*/
3909: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3910: {
3920: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3921: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3922: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3923: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3924: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3925: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3926: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3927: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3928: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3930: MatCheckPreallocated(mat,1);
3931: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3932: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3933: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3934: PetscObjectStateIncrease((PetscObject)x);
3935: return(0);
3936: }
3938: /*
3939: Default matrix copy routine.
3940: */
3941: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3942: {
3943: PetscErrorCode ierr;
3944: PetscInt i,rstart = 0,rend = 0,nz;
3945: const PetscInt *cwork;
3946: const PetscScalar *vwork;
3949: if (B->assembled) {
3950: MatZeroEntries(B);
3951: }
3952: MatGetOwnershipRange(A,&rstart,&rend);
3953: for (i=rstart; i<rend; i++) {
3954: MatGetRow(A,i,&nz,&cwork,&vwork);
3955: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3956: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3957: }
3958: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3959: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3960: return(0);
3961: }
3963: /*@
3964: MatCopy - Copys a matrix to another matrix.
3966: Collective on Mat
3968: Input Parameters:
3969: + A - the matrix
3970: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3972: Output Parameter:
3973: . B - where the copy is put
3975: Notes:
3976: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3977: same nonzero pattern or the routine will crash.
3979: MatCopy() copies the matrix entries of a matrix to another existing
3980: matrix (after first zeroing the second matrix). A related routine is
3981: MatConvert(), which first creates a new matrix and then copies the data.
3983: Level: intermediate
3985: Concepts: matrices^copying
3987: .seealso: MatConvert(), MatDuplicate()
3989: @*/
3990: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3991: {
3993: PetscInt i;
4001: MatCheckPreallocated(B,2);
4002: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4003: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4004: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4005: MatCheckPreallocated(A,1);
4006: if (A == B) return(0);
4008: PetscLogEventBegin(MAT_Copy,A,B,0,0);
4009: if (A->ops->copy) {
4010: (*A->ops->copy)(A,B,str);
4011: } else { /* generic conversion */
4012: MatCopy_Basic(A,B,str);
4013: }
4015: B->stencil.dim = A->stencil.dim;
4016: B->stencil.noc = A->stencil.noc;
4017: for (i=0; i<=A->stencil.dim; i++) {
4018: B->stencil.dims[i] = A->stencil.dims[i];
4019: B->stencil.starts[i] = A->stencil.starts[i];
4020: }
4022: PetscLogEventEnd(MAT_Copy,A,B,0,0);
4023: PetscObjectStateIncrease((PetscObject)B);
4024: return(0);
4025: }
4027: /*@C
4028: MatConvert - Converts a matrix to another matrix, either of the same
4029: or different type.
4031: Collective on Mat
4033: Input Parameters:
4034: + mat - the matrix
4035: . newtype - new matrix type. Use MATSAME to create a new matrix of the
4036: same type as the original matrix.
4037: - reuse - denotes if the destination matrix is to be created or reused.
4038: Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
4039: MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).
4041: Output Parameter:
4042: . M - pointer to place new matrix
4044: Notes:
4045: MatConvert() first creates a new matrix and then copies the data from
4046: the first matrix. A related routine is MatCopy(), which copies the matrix
4047: entries of one matrix to another already existing matrix context.
4049: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
4050: the MPI communicator of the generated matrix is always the same as the communicator
4051: of the input matrix.
4053: Level: intermediate
4055: Concepts: matrices^converting between storage formats
4057: .seealso: MatCopy(), MatDuplicate()
4058: @*/
4059: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
4060: {
4062: PetscBool sametype,issame,flg;
4063: char convname[256],mtype[256];
4064: Mat B;
4070: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4071: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4072: MatCheckPreallocated(mat,1);
4074: PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
4075: if (flg) {
4076: newtype = mtype;
4077: }
4078: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
4079: PetscStrcmp(newtype,"same",&issame);
4080: if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");
4081: if ((reuse == MAT_REUSE_MATRIX) && (mat == *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX means reuse matrix in final argument, perhaps you mean MAT_INPLACE_MATRIX");
4083: if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);
4085: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4086: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4087: } else {
4088: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4089: const char *prefix[3] = {"seq","mpi",""};
4090: PetscInt i;
4091: /*
4092: Order of precedence:
4093: 1) See if a specialized converter is known to the current matrix.
4094: 2) See if a specialized converter is known to the desired matrix class.
4095: 3) See if a good general converter is registered for the desired class
4096: (as of 6/27/03 only MATMPIADJ falls into this category).
4097: 4) See if a good general converter is known for the current matrix.
4098: 5) Use a really basic converter.
4099: */
4101: /* 1) See if a specialized converter is known to the current matrix and the desired class */
4102: for (i=0; i<3; i++) {
4103: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4104: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4105: PetscStrlcat(convname,"_",sizeof(convname));
4106: PetscStrlcat(convname,prefix[i],sizeof(convname));
4107: PetscStrlcat(convname,issame ? ((PetscObject)mat)->type_name : newtype,sizeof(convname));
4108: PetscStrlcat(convname,"_C",sizeof(convname));
4109: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4110: if (conv) goto foundconv;
4111: }
4113: /* 2) See if a specialized converter is known to the desired matrix class. */
4114: MatCreate(PetscObjectComm((PetscObject)mat),&B);
4115: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4116: MatSetType(B,newtype);
4117: for (i=0; i<3; i++) {
4118: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4119: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4120: PetscStrlcat(convname,"_",sizeof(convname));
4121: PetscStrlcat(convname,prefix[i],sizeof(convname));
4122: PetscStrlcat(convname,newtype,sizeof(convname));
4123: PetscStrlcat(convname,"_C",sizeof(convname));
4124: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4125: if (conv) {
4126: MatDestroy(&B);
4127: goto foundconv;
4128: }
4129: }
4131: /* 3) See if a good general converter is registered for the desired class */
4132: conv = B->ops->convertfrom;
4133: MatDestroy(&B);
4134: if (conv) goto foundconv;
4136: /* 4) See if a good general converter is known for the current matrix */
4137: if (mat->ops->convert) {
4138: conv = mat->ops->convert;
4139: }
4140: if (conv) goto foundconv;
4142: /* 5) Use a really basic converter. */
4143: conv = MatConvert_Basic;
4145: foundconv:
4146: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4147: (*conv)(mat,newtype,reuse,M);
4148: if (mat->rmap->mapping && mat->cmap->mapping && !(*M)->rmap->mapping && !(*M)->cmap->mapping) {
4149: /* the block sizes must be same if the mappings are copied over */
4150: (*M)->rmap->bs = mat->rmap->bs;
4151: (*M)->cmap->bs = mat->cmap->bs;
4152: PetscObjectReference((PetscObject)mat->rmap->mapping);
4153: PetscObjectReference((PetscObject)mat->cmap->mapping);
4154: (*M)->rmap->mapping = mat->rmap->mapping;
4155: (*M)->cmap->mapping = mat->cmap->mapping;
4156: }
4157: (*M)->stencil.dim = mat->stencil.dim;
4158: (*M)->stencil.noc = mat->stencil.noc;
4159: for (i=0; i<=mat->stencil.dim; i++) {
4160: (*M)->stencil.dims[i] = mat->stencil.dims[i];
4161: (*M)->stencil.starts[i] = mat->stencil.starts[i];
4162: }
4163: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4164: }
4165: PetscObjectStateIncrease((PetscObject)*M);
4167: /* Copy Mat options */
4168: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
4169: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
4170: return(0);
4171: }
4173: /*@C
4174: MatFactorGetSolverType - Returns name of the package providing the factorization routines
4176: Not Collective
4178: Input Parameter:
4179: . mat - the matrix, must be a factored matrix
4181: Output Parameter:
4182: . type - the string name of the package (do not free this string)
4184: Notes:
4185: In Fortran you pass in a empty string and the package name will be copied into it.
4186: (Make sure the string is long enough)
4188: Level: intermediate
4190: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4191: @*/
4192: PetscErrorCode MatFactorGetSolverType(Mat mat, MatSolverType *type)
4193: {
4194: PetscErrorCode ierr, (*conv)(Mat,MatSolverType*);
4199: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4200: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverType_C",&conv);
4201: if (!conv) {
4202: *type = MATSOLVERPETSC;
4203: } else {
4204: (*conv)(mat,type);
4205: }
4206: return(0);
4207: }
4209: typedef struct _MatSolverTypeForSpecifcType* MatSolverTypeForSpecifcType;
4210: struct _MatSolverTypeForSpecifcType {
4211: MatType mtype;
4212: PetscErrorCode (*getfactor[4])(Mat,MatFactorType,Mat*);
4213: MatSolverTypeForSpecifcType next;
4214: };
4216: typedef struct _MatSolverTypeHolder* MatSolverTypeHolder;
4217: struct _MatSolverTypeHolder {
4218: char *name;
4219: MatSolverTypeForSpecifcType handlers;
4220: MatSolverTypeHolder next;
4221: };
4223: static MatSolverTypeHolder MatSolverTypeHolders = NULL;
4225: /*@C
4226: MatSolvePackageRegister - Registers a MatSolverType that works for a particular matrix type
4228: Input Parameters:
4229: + package - name of the package, for example petsc or superlu
4230: . mtype - the matrix type that works with this package
4231: . ftype - the type of factorization supported by the package
4232: - getfactor - routine that will create the factored matrix ready to be used
4234: Level: intermediate
4236: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4237: @*/
4238: PetscErrorCode MatSolverTypeRegister(MatSolverType package,MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4239: {
4240: PetscErrorCode ierr;
4241: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4242: PetscBool flg;
4243: MatSolverTypeForSpecifcType inext,iprev = NULL;
4246: MatInitializePackage();
4247: if (!next) {
4248: PetscNew(&MatSolverTypeHolders);
4249: PetscStrallocpy(package,&MatSolverTypeHolders->name);
4250: PetscNew(&MatSolverTypeHolders->handlers);
4251: PetscStrallocpy(mtype,(char **)&MatSolverTypeHolders->handlers->mtype);
4252: MatSolverTypeHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4253: return(0);
4254: }
4255: while (next) {
4256: PetscStrcasecmp(package,next->name,&flg);
4257: if (flg) {
4258: if (!next->handlers) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MatSolverTypeHolder is missing handlers");
4259: inext = next->handlers;
4260: while (inext) {
4261: PetscStrcasecmp(mtype,inext->mtype,&flg);
4262: if (flg) {
4263: inext->getfactor[(int)ftype-1] = getfactor;
4264: return(0);
4265: }
4266: iprev = inext;
4267: inext = inext->next;
4268: }
4269: PetscNew(&iprev->next);
4270: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4271: iprev->next->getfactor[(int)ftype-1] = getfactor;
4272: return(0);
4273: }
4274: prev = next;
4275: next = next->next;
4276: }
4277: PetscNew(&prev->next);
4278: PetscStrallocpy(package,&prev->next->name);
4279: PetscNew(&prev->next->handlers);
4280: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4281: prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4282: return(0);
4283: }
4285: /*@C
4286: MatSolvePackageGet - Get's the function that creates the factor matrix if it exist
4288: Input Parameters:
4289: + package - name of the package, for example petsc or superlu
4290: . ftype - the type of factorization supported by the package
4291: - mtype - the matrix type that works with this package
4293: Output Parameters:
4294: + foundpackage - PETSC_TRUE if the package was registered
4295: . foundmtype - PETSC_TRUE if the package supports the requested mtype
4296: - getfactor - routine that will create the factored matrix ready to be used or NULL if not found
4298: Level: intermediate
4300: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4301: @*/
4302: PetscErrorCode MatSolverTypeGet(MatSolverType package,MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4303: {
4304: PetscErrorCode ierr;
4305: MatSolverTypeHolder next = MatSolverTypeHolders;
4306: PetscBool flg;
4307: MatSolverTypeForSpecifcType inext;
4310: if (foundpackage) *foundpackage = PETSC_FALSE;
4311: if (foundmtype) *foundmtype = PETSC_FALSE;
4312: if (getfactor) *getfactor = NULL;
4314: if (package) {
4315: while (next) {
4316: PetscStrcasecmp(package,next->name,&flg);
4317: if (flg) {
4318: if (foundpackage) *foundpackage = PETSC_TRUE;
4319: inext = next->handlers;
4320: while (inext) {
4321: PetscStrbeginswith(mtype,inext->mtype,&flg);
4322: if (flg) {
4323: if (foundmtype) *foundmtype = PETSC_TRUE;
4324: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4325: return(0);
4326: }
4327: inext = inext->next;
4328: }
4329: }
4330: next = next->next;
4331: }
4332: } else {
4333: while (next) {
4334: inext = next->handlers;
4335: while (inext) {
4336: PetscStrbeginswith(mtype,inext->mtype,&flg);
4337: if (flg && inext->getfactor[(int)ftype-1]) {
4338: if (foundpackage) *foundpackage = PETSC_TRUE;
4339: if (foundmtype) *foundmtype = PETSC_TRUE;
4340: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4341: return(0);
4342: }
4343: inext = inext->next;
4344: }
4345: next = next->next;
4346: }
4347: }
4348: return(0);
4349: }
4351: PetscErrorCode MatSolverTypeDestroy(void)
4352: {
4353: PetscErrorCode ierr;
4354: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4355: MatSolverTypeForSpecifcType inext,iprev;
4358: while (next) {
4359: PetscFree(next->name);
4360: inext = next->handlers;
4361: while (inext) {
4362: PetscFree(inext->mtype);
4363: iprev = inext;
4364: inext = inext->next;
4365: PetscFree(iprev);
4366: }
4367: prev = next;
4368: next = next->next;
4369: PetscFree(prev);
4370: }
4371: MatSolverTypeHolders = NULL;
4372: return(0);
4373: }
4375: /*@C
4376: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4378: Collective on Mat
4380: Input Parameters:
4381: + mat - the matrix
4382: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4383: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4385: Output Parameters:
4386: . f - the factor matrix used with MatXXFactorSymbolic() calls
4388: Notes:
4389: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4390: such as pastix, superlu, mumps etc.
4392: PETSc must have been ./configure to use the external solver, using the option --download-package
4394: Level: intermediate
4396: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4397: @*/
4398: PetscErrorCode MatGetFactor(Mat mat, MatSolverType type,MatFactorType ftype,Mat *f)
4399: {
4400: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4401: PetscBool foundpackage,foundmtype;
4407: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4408: MatCheckPreallocated(mat,1);
4410: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4411: if (!foundpackage) {
4412: if (type) {
4413: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4414: } else {
4415: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4416: }
4417: }
4419: if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4420: if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverType %s does not support factorization type %s for matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4422: #if defined(PETSC_USE_COMPLEX)
4423: if (mat->hermitian && !mat->symmetric && (ftype == MAT_FACTOR_CHOLESKY||ftype == MAT_FACTOR_ICC)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Hermitian CHOLESKY or ICC Factor is not supported");
4424: #endif
4426: (*conv)(mat,ftype,f);
4427: return(0);
4428: }
4430: /*@C
4431: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4433: Not Collective
4435: Input Parameters:
4436: + mat - the matrix
4437: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4438: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4440: Output Parameter:
4441: . flg - PETSC_TRUE if the factorization is available
4443: Notes:
4444: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4445: such as pastix, superlu, mumps etc.
4447: PETSc must have been ./configure to use the external solver, using the option --download-package
4449: Level: intermediate
4451: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4452: @*/
4453: PetscErrorCode MatGetFactorAvailable(Mat mat, MatSolverType type,MatFactorType ftype,PetscBool *flg)
4454: {
4455: PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);
4461: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4462: MatCheckPreallocated(mat,1);
4464: *flg = PETSC_FALSE;
4465: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4466: if (gconv) {
4467: *flg = PETSC_TRUE;
4468: }
4469: return(0);
4470: }
4472: #include <petscdmtypes.h>
4474: /*@
4475: MatDuplicate - Duplicates a matrix including the non-zero structure.
4477: Collective on Mat
4479: Input Parameters:
4480: + mat - the matrix
4481: - op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4482: See the manual page for MatDuplicateOption for an explanation of these options.
4484: Output Parameter:
4485: . M - pointer to place new matrix
4487: Level: intermediate
4489: Concepts: matrices^duplicating
4491: Notes:
4492: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4494: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4495: @*/
4496: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4497: {
4499: Mat B;
4500: PetscInt i;
4501: DM dm;
4502: void (*viewf)(void);
4508: if (op == MAT_COPY_VALUES && !mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"MAT_COPY_VALUES not allowed for unassembled matrix");
4509: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4510: MatCheckPreallocated(mat,1);
4512: *M = 0;
4513: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4514: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4515: (*mat->ops->duplicate)(mat,op,M);
4516: B = *M;
4518: MatGetOperation(mat,MATOP_VIEW,&viewf);
4519: if (viewf) {
4520: MatSetOperation(B,MATOP_VIEW,viewf);
4521: }
4523: B->stencil.dim = mat->stencil.dim;
4524: B->stencil.noc = mat->stencil.noc;
4525: for (i=0; i<=mat->stencil.dim; i++) {
4526: B->stencil.dims[i] = mat->stencil.dims[i];
4527: B->stencil.starts[i] = mat->stencil.starts[i];
4528: }
4530: B->nooffproczerorows = mat->nooffproczerorows;
4531: B->nooffprocentries = mat->nooffprocentries;
4533: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4534: if (dm) {
4535: PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4536: }
4537: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4538: PetscObjectStateIncrease((PetscObject)B);
4539: return(0);
4540: }
4542: /*@
4543: MatGetDiagonal - Gets the diagonal of a matrix.
4545: Logically Collective on Mat and Vec
4547: Input Parameters:
4548: + mat - the matrix
4549: - v - the vector for storing the diagonal
4551: Output Parameter:
4552: . v - the diagonal of the matrix
4554: Level: intermediate
4556: Note:
4557: Currently only correct in parallel for square matrices.
4559: Concepts: matrices^accessing diagonals
4561: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4562: @*/
4563: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4564: {
4571: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4572: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4573: MatCheckPreallocated(mat,1);
4575: (*mat->ops->getdiagonal)(mat,v);
4576: PetscObjectStateIncrease((PetscObject)v);
4577: return(0);
4578: }
4580: /*@C
4581: MatGetRowMin - Gets the minimum value (of the real part) of each
4582: row of the matrix
4584: Logically Collective on Mat and Vec
4586: Input Parameters:
4587: . mat - the matrix
4589: Output Parameter:
4590: + v - the vector for storing the maximums
4591: - idx - the indices of the column found for each row (optional)
4593: Level: intermediate
4595: Notes:
4596: The result of this call are the same as if one converted the matrix to dense format
4597: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4599: This code is only implemented for a couple of matrix formats.
4601: Concepts: matrices^getting row maximums
4603: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4604: MatGetRowMax()
4605: @*/
4606: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4607: {
4614: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4615: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4616: MatCheckPreallocated(mat,1);
4618: (*mat->ops->getrowmin)(mat,v,idx);
4619: PetscObjectStateIncrease((PetscObject)v);
4620: return(0);
4621: }
4623: /*@C
4624: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4625: row of the matrix
4627: Logically Collective on Mat and Vec
4629: Input Parameters:
4630: . mat - the matrix
4632: Output Parameter:
4633: + v - the vector for storing the minimums
4634: - idx - the indices of the column found for each row (or NULL if not needed)
4636: Level: intermediate
4638: Notes:
4639: if a row is completely empty or has only 0.0 values then the idx[] value for that
4640: row is 0 (the first column).
4642: This code is only implemented for a couple of matrix formats.
4644: Concepts: matrices^getting row maximums
4646: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4647: @*/
4648: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4649: {
4656: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4657: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4658: MatCheckPreallocated(mat,1);
4659: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4661: (*mat->ops->getrowminabs)(mat,v,idx);
4662: PetscObjectStateIncrease((PetscObject)v);
4663: return(0);
4664: }
4666: /*@C
4667: MatGetRowMax - Gets the maximum value (of the real part) of each
4668: row of the matrix
4670: Logically Collective on Mat and Vec
4672: Input Parameters:
4673: . mat - the matrix
4675: Output Parameter:
4676: + v - the vector for storing the maximums
4677: - idx - the indices of the column found for each row (optional)
4679: Level: intermediate
4681: Notes:
4682: The result of this call are the same as if one converted the matrix to dense format
4683: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4685: This code is only implemented for a couple of matrix formats.
4687: Concepts: matrices^getting row maximums
4689: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4690: @*/
4691: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4692: {
4699: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4700: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4701: MatCheckPreallocated(mat,1);
4703: (*mat->ops->getrowmax)(mat,v,idx);
4704: PetscObjectStateIncrease((PetscObject)v);
4705: return(0);
4706: }
4708: /*@C
4709: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4710: row of the matrix
4712: Logically Collective on Mat and Vec
4714: Input Parameters:
4715: . mat - the matrix
4717: Output Parameter:
4718: + v - the vector for storing the maximums
4719: - idx - the indices of the column found for each row (or NULL if not needed)
4721: Level: intermediate
4723: Notes:
4724: if a row is completely empty or has only 0.0 values then the idx[] value for that
4725: row is 0 (the first column).
4727: This code is only implemented for a couple of matrix formats.
4729: Concepts: matrices^getting row maximums
4731: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4732: @*/
4733: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4734: {
4741: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4742: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4743: MatCheckPreallocated(mat,1);
4744: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4746: (*mat->ops->getrowmaxabs)(mat,v,idx);
4747: PetscObjectStateIncrease((PetscObject)v);
4748: return(0);
4749: }
4751: /*@
4752: MatGetRowSum - Gets the sum of each row of the matrix
4754: Logically or Neighborhood Collective on Mat and Vec
4756: Input Parameters:
4757: . mat - the matrix
4759: Output Parameter:
4760: . v - the vector for storing the sum of rows
4762: Level: intermediate
4764: Notes:
4765: This code is slow since it is not currently specialized for different formats
4767: Concepts: matrices^getting row sums
4769: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4770: @*/
4771: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4772: {
4773: Vec ones;
4780: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4781: MatCheckPreallocated(mat,1);
4782: MatCreateVecs(mat,&ones,NULL);
4783: VecSet(ones,1.);
4784: MatMult(mat,ones,v);
4785: VecDestroy(&ones);
4786: return(0);
4787: }
4789: /*@
4790: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4792: Collective on Mat
4794: Input Parameter:
4795: + mat - the matrix to transpose
4796: - reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX
4798: Output Parameters:
4799: . B - the transpose
4801: Notes:
4802: If you use MAT_INPLACE_MATRIX then you must pass in &mat for B
4804: MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used
4806: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4808: Level: intermediate
4810: Concepts: matrices^transposing
4812: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4813: @*/
4814: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4815: {
4821: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4822: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4823: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4824: if (reuse == MAT_INPLACE_MATRIX && mat != *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires last matrix to match first");
4825: if (reuse == MAT_REUSE_MATRIX && mat == *B) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Perhaps you mean MAT_INPLACE_MATRIX");
4826: MatCheckPreallocated(mat,1);
4828: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4829: (*mat->ops->transpose)(mat,reuse,B);
4830: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4831: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4832: return(0);
4833: }
4835: /*@
4836: MatIsTranspose - Test whether a matrix is another one's transpose,
4837: or its own, in which case it tests symmetry.
4839: Collective on Mat
4841: Input Parameter:
4842: + A - the matrix to test
4843: - B - the matrix to test against, this can equal the first parameter
4845: Output Parameters:
4846: . flg - the result
4848: Notes:
4849: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4850: has a running time of the order of the number of nonzeros; the parallel
4851: test involves parallel copies of the block-offdiagonal parts of the matrix.
4853: Level: intermediate
4855: Concepts: matrices^transposing, matrix^symmetry
4857: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4858: @*/
4859: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4860: {
4861: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4867: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4868: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4869: *flg = PETSC_FALSE;
4870: if (f && g) {
4871: if (f == g) {
4872: (*f)(A,B,tol,flg);
4873: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4874: } else {
4875: MatType mattype;
4876: if (!f) {
4877: MatGetType(A,&mattype);
4878: } else {
4879: MatGetType(B,&mattype);
4880: }
4881: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4882: }
4883: return(0);
4884: }
4886: /*@
4887: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4889: Collective on Mat
4891: Input Parameter:
4892: + mat - the matrix to transpose and complex conjugate
4893: - reuse - MAT_INITIAL_MATRIX to create a new matrix, MAT_INPLACE_MATRIX to reuse the first argument to store the transpose
4895: Output Parameters:
4896: . B - the Hermitian
4898: Level: intermediate
4900: Concepts: matrices^transposing, complex conjugatex
4902: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4903: @*/
4904: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4905: {
4909: MatTranspose(mat,reuse,B);
4910: #if defined(PETSC_USE_COMPLEX)
4911: MatConjugate(*B);
4912: #endif
4913: return(0);
4914: }
4916: /*@
4917: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4919: Collective on Mat
4921: Input Parameter:
4922: + A - the matrix to test
4923: - B - the matrix to test against, this can equal the first parameter
4925: Output Parameters:
4926: . flg - the result
4928: Notes:
4929: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4930: has a running time of the order of the number of nonzeros; the parallel
4931: test involves parallel copies of the block-offdiagonal parts of the matrix.
4933: Level: intermediate
4935: Concepts: matrices^transposing, matrix^symmetry
4937: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4938: @*/
4939: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4940: {
4941: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4947: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4948: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4949: if (f && g) {
4950: if (f==g) {
4951: (*f)(A,B,tol,flg);
4952: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4953: }
4954: return(0);
4955: }
4957: /*@
4958: MatPermute - Creates a new matrix with rows and columns permuted from the
4959: original.
4961: Collective on Mat
4963: Input Parameters:
4964: + mat - the matrix to permute
4965: . row - row permutation, each processor supplies only the permutation for its rows
4966: - col - column permutation, each processor supplies only the permutation for its columns
4968: Output Parameters:
4969: . B - the permuted matrix
4971: Level: advanced
4973: Note:
4974: The index sets map from row/col of permuted matrix to row/col of original matrix.
4975: The index sets should be on the same communicator as Mat and have the same local sizes.
4977: Concepts: matrices^permuting
4979: .seealso: MatGetOrdering(), ISAllGather()
4981: @*/
4982: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4983: {
4992: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4993: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4994: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4995: MatCheckPreallocated(mat,1);
4997: (*mat->ops->permute)(mat,row,col,B);
4998: PetscObjectStateIncrease((PetscObject)*B);
4999: return(0);
5000: }
5002: /*@
5003: MatEqual - Compares two matrices.
5005: Collective on Mat
5007: Input Parameters:
5008: + A - the first matrix
5009: - B - the second matrix
5011: Output Parameter:
5012: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
5014: Level: intermediate
5016: Concepts: matrices^equality between
5017: @*/
5018: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
5019: {
5029: MatCheckPreallocated(B,2);
5030: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5031: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5032: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
5033: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
5034: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
5035: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
5036: MatCheckPreallocated(A,1);
5038: (*A->ops->equal)(A,B,flg);
5039: return(0);
5040: }
5042: /*@
5043: MatDiagonalScale - Scales a matrix on the left and right by diagonal
5044: matrices that are stored as vectors. Either of the two scaling
5045: matrices can be NULL.
5047: Collective on Mat
5049: Input Parameters:
5050: + mat - the matrix to be scaled
5051: . l - the left scaling vector (or NULL)
5052: - r - the right scaling vector (or NULL)
5054: Notes:
5055: MatDiagonalScale() computes A = LAR, where
5056: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
5057: The L scales the rows of the matrix, the R scales the columns of the matrix.
5059: Level: intermediate
5061: Concepts: matrices^diagonal scaling
5062: Concepts: diagonal scaling of matrices
5064: .seealso: MatScale(), MatShift(), MatDiagonalSet()
5065: @*/
5066: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
5067: {
5073: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5076: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5077: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5078: MatCheckPreallocated(mat,1);
5080: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5081: (*mat->ops->diagonalscale)(mat,l,r);
5082: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5083: PetscObjectStateIncrease((PetscObject)mat);
5084: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5085: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5086: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5087: }
5088: #endif
5089: return(0);
5090: }
5092: /*@
5093: MatScale - Scales all elements of a matrix by a given number.
5095: Logically Collective on Mat
5097: Input Parameters:
5098: + mat - the matrix to be scaled
5099: - a - the scaling value
5101: Output Parameter:
5102: . mat - the scaled matrix
5104: Level: intermediate
5106: Concepts: matrices^scaling all entries
5108: .seealso: MatDiagonalScale()
5109: @*/
5110: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5111: {
5117: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5118: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5119: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5121: MatCheckPreallocated(mat,1);
5123: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5124: if (a != (PetscScalar)1.0) {
5125: (*mat->ops->scale)(mat,a);
5126: PetscObjectStateIncrease((PetscObject)mat);
5127: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5128: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5129: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5130: }
5131: #endif
5132: }
5133: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5134: return(0);
5135: }
5137: static PetscErrorCode MatNorm_Basic(Mat A,NormType type,PetscReal *nrm)
5138: {
5142: if (type == NORM_1 || type == NORM_INFINITY) {
5143: Vec l,r;
5145: MatCreateVecs(A,&r,&l);
5146: if (type == NORM_INFINITY) {
5147: VecSet(r,1.);
5148: MatMult(A,r,l);
5149: VecNorm(l,NORM_INFINITY,nrm);
5150: } else {
5151: VecSet(l,1.);
5152: MatMultTranspose(A,l,r);
5153: VecNorm(r,NORM_INFINITY,nrm);
5154: }
5155: VecDestroy(&l);
5156: VecDestroy(&r);
5157: } else SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix class %s, norm type %d",((PetscObject)A)->type_name,type);
5158: return(0);
5159: }
5161: /*@
5162: MatNorm - Calculates various norms of a matrix.
5164: Collective on Mat
5166: Input Parameters:
5167: + mat - the matrix
5168: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
5170: Output Parameters:
5171: . nrm - the resulting norm
5173: Level: intermediate
5175: Concepts: matrices^norm
5176: Concepts: norm^of matrix
5177: @*/
5178: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5179: {
5188: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5189: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5190: MatCheckPreallocated(mat,1);
5192: if (!mat->ops->norm) {
5193: MatNorm_Basic(mat,type,nrm);
5194: } else {
5195: (*mat->ops->norm)(mat,type,nrm);
5196: }
5197: return(0);
5198: }
5200: /*
5201: This variable is used to prevent counting of MatAssemblyBegin() that
5202: are called from within a MatAssemblyEnd().
5203: */
5204: static PetscInt MatAssemblyEnd_InUse = 0;
5205: /*@
5206: MatAssemblyBegin - Begins assembling the matrix. This routine should
5207: be called after completing all calls to MatSetValues().
5209: Collective on Mat
5211: Input Parameters:
5212: + mat - the matrix
5213: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5215: Notes:
5216: MatSetValues() generally caches the values. The matrix is ready to
5217: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5218: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5219: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5220: using the matrix.
5222: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5223: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5224: a global collective operation requring all processes that share the matrix.
5226: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5227: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5228: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5230: Level: beginner
5232: Concepts: matrices^assembling
5234: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5235: @*/
5236: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5237: {
5243: MatCheckPreallocated(mat,1);
5244: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5245: if (mat->assembled) {
5246: mat->was_assembled = PETSC_TRUE;
5247: mat->assembled = PETSC_FALSE;
5248: }
5249: if (!MatAssemblyEnd_InUse) {
5250: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5251: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5252: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5253: } else if (mat->ops->assemblybegin) {
5254: (*mat->ops->assemblybegin)(mat,type);
5255: }
5256: return(0);
5257: }
5259: /*@
5260: MatAssembled - Indicates if a matrix has been assembled and is ready for
5261: use; for example, in matrix-vector product.
5263: Not Collective
5265: Input Parameter:
5266: . mat - the matrix
5268: Output Parameter:
5269: . assembled - PETSC_TRUE or PETSC_FALSE
5271: Level: advanced
5273: Concepts: matrices^assembled?
5275: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5276: @*/
5277: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5278: {
5283: *assembled = mat->assembled;
5284: return(0);
5285: }
5287: /*@
5288: MatAssemblyEnd - Completes assembling the matrix. This routine should
5289: be called after MatAssemblyBegin().
5291: Collective on Mat
5293: Input Parameters:
5294: + mat - the matrix
5295: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5297: Options Database Keys:
5298: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5299: . -mat_view ::ascii_info_detail - Prints more detailed info
5300: . -mat_view - Prints matrix in ASCII format
5301: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5302: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5303: . -display <name> - Sets display name (default is host)
5304: . -draw_pause <sec> - Sets number of seconds to pause after display
5305: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 12 Using MATLAB with PETSc )
5306: . -viewer_socket_machine <machine> - Machine to use for socket
5307: . -viewer_socket_port <port> - Port number to use for socket
5308: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5310: Notes:
5311: MatSetValues() generally caches the values. The matrix is ready to
5312: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5313: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5314: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5315: using the matrix.
5317: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5318: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5319: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5321: Level: beginner
5323: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5324: @*/
5325: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5326: {
5327: PetscErrorCode ierr;
5328: static PetscInt inassm = 0;
5329: PetscBool flg = PETSC_FALSE;
5335: inassm++;
5336: MatAssemblyEnd_InUse++;
5337: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5338: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5339: if (mat->ops->assemblyend) {
5340: (*mat->ops->assemblyend)(mat,type);
5341: }
5342: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5343: } else if (mat->ops->assemblyend) {
5344: (*mat->ops->assemblyend)(mat,type);
5345: }
5347: /* Flush assembly is not a true assembly */
5348: if (type != MAT_FLUSH_ASSEMBLY) {
5349: mat->assembled = PETSC_TRUE; mat->num_ass++;
5350: }
5351: mat->insertmode = NOT_SET_VALUES;
5352: MatAssemblyEnd_InUse--;
5353: PetscObjectStateIncrease((PetscObject)mat);
5354: if (!mat->symmetric_eternal) {
5355: mat->symmetric_set = PETSC_FALSE;
5356: mat->hermitian_set = PETSC_FALSE;
5357: mat->structurally_symmetric_set = PETSC_FALSE;
5358: }
5359: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5360: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5361: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5362: }
5363: #endif
5364: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5365: MatViewFromOptions(mat,NULL,"-mat_view");
5367: if (mat->checksymmetryonassembly) {
5368: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5369: if (flg) {
5370: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5371: } else {
5372: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5373: }
5374: }
5375: if (mat->nullsp && mat->checknullspaceonassembly) {
5376: MatNullSpaceTest(mat->nullsp,mat,NULL);
5377: }
5378: }
5379: inassm--;
5380: return(0);
5381: }
5383: /*@
5384: MatSetOption - Sets a parameter option for a matrix. Some options
5385: may be specific to certain storage formats. Some options
5386: determine how values will be inserted (or added). Sorted,
5387: row-oriented input will generally assemble the fastest. The default
5388: is row-oriented.
5390: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5392: Input Parameters:
5393: + mat - the matrix
5394: . option - the option, one of those listed below (and possibly others),
5395: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5397: Options Describing Matrix Structure:
5398: + MAT_SPD - symmetric positive definite
5399: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5400: . MAT_HERMITIAN - transpose is the complex conjugation
5401: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5402: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5403: you set to be kept with all future use of the matrix
5404: including after MatAssemblyBegin/End() which could
5405: potentially change the symmetry structure, i.e. you
5406: KNOW the matrix will ALWAYS have the property you set.
5409: Options For Use with MatSetValues():
5410: Insert a logically dense subblock, which can be
5411: . MAT_ROW_ORIENTED - row-oriented (default)
5413: Note these options reflect the data you pass in with MatSetValues(); it has
5414: nothing to do with how the data is stored internally in the matrix
5415: data structure.
5417: When (re)assembling a matrix, we can restrict the input for
5418: efficiency/debugging purposes. These options include:
5419: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5420: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5421: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5422: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5423: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5424: . MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5425: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5426: performance for very large process counts.
5427: - MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5428: of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5429: functions, instead sending only neighbor messages.
5431: Notes:
5432: Except for MAT_UNUSED_NONZERO_LOCATION_ERR and MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!
5434: Some options are relevant only for particular matrix types and
5435: are thus ignored by others. Other options are not supported by
5436: certain matrix types and will generate an error message if set.
5438: If using a Fortran 77 module to compute a matrix, one may need to
5439: use the column-oriented option (or convert to the row-oriented
5440: format).
5442: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5443: that would generate a new entry in the nonzero structure is instead
5444: ignored. Thus, if memory has not alredy been allocated for this particular
5445: data, then the insertion is ignored. For dense matrices, in which
5446: the entire array is allocated, no entries are ever ignored.
5447: Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5449: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5450: that would generate a new entry in the nonzero structure instead produces
5451: an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5453: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5454: that would generate a new entry that has not been preallocated will
5455: instead produce an error. (Currently supported for AIJ and BAIJ formats
5456: only.) This is a useful flag when debugging matrix memory preallocation.
5457: If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5459: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5460: other processors should be dropped, rather than stashed.
5461: This is useful if you know that the "owning" processor is also
5462: always generating the correct matrix entries, so that PETSc need
5463: not transfer duplicate entries generated on another processor.
5465: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5466: searches during matrix assembly. When this flag is set, the hash table
5467: is created during the first Matrix Assembly. This hash table is
5468: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5469: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5470: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5471: supported by MATMPIBAIJ format only.
5473: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5474: are kept in the nonzero structure
5476: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5477: a zero location in the matrix
5479: MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types
5481: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5482: zero row routines and thus improves performance for very large process counts.
5484: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5485: part of the matrix (since they should match the upper triangular part).
5487: Notes:
5488: Can only be called after MatSetSizes() and MatSetType() have been set.
5490: Level: intermediate
5492: Concepts: matrices^setting options
5494: .seealso: MatOption, Mat
5496: @*/
5497: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5498: {
5504: if (op > 0) {
5507: }
5509: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5510: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5512: switch (op) {
5513: case MAT_NO_OFF_PROC_ENTRIES:
5514: mat->nooffprocentries = flg;
5515: return(0);
5516: break;
5517: case MAT_SUBSET_OFF_PROC_ENTRIES:
5518: mat->subsetoffprocentries = flg;
5519: return(0);
5520: case MAT_NO_OFF_PROC_ZERO_ROWS:
5521: mat->nooffproczerorows = flg;
5522: return(0);
5523: break;
5524: case MAT_SPD:
5525: mat->spd_set = PETSC_TRUE;
5526: mat->spd = flg;
5527: if (flg) {
5528: mat->symmetric = PETSC_TRUE;
5529: mat->structurally_symmetric = PETSC_TRUE;
5530: mat->symmetric_set = PETSC_TRUE;
5531: mat->structurally_symmetric_set = PETSC_TRUE;
5532: }
5533: break;
5534: case MAT_SYMMETRIC:
5535: mat->symmetric = flg;
5536: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5537: mat->symmetric_set = PETSC_TRUE;
5538: mat->structurally_symmetric_set = flg;
5539: #if !defined(PETSC_USE_COMPLEX)
5540: mat->hermitian = flg;
5541: mat->hermitian_set = PETSC_TRUE;
5542: #endif
5543: break;
5544: case MAT_HERMITIAN:
5545: mat->hermitian = flg;
5546: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5547: mat->hermitian_set = PETSC_TRUE;
5548: mat->structurally_symmetric_set = flg;
5549: #if !defined(PETSC_USE_COMPLEX)
5550: mat->symmetric = flg;
5551: mat->symmetric_set = PETSC_TRUE;
5552: #endif
5553: break;
5554: case MAT_STRUCTURALLY_SYMMETRIC:
5555: mat->structurally_symmetric = flg;
5556: mat->structurally_symmetric_set = PETSC_TRUE;
5557: break;
5558: case MAT_SYMMETRY_ETERNAL:
5559: mat->symmetric_eternal = flg;
5560: break;
5561: case MAT_STRUCTURE_ONLY:
5562: mat->structure_only = flg;
5563: break;
5564: default:
5565: break;
5566: }
5567: if (mat->ops->setoption) {
5568: (*mat->ops->setoption)(mat,op,flg);
5569: }
5570: return(0);
5571: }
5573: /*@
5574: MatGetOption - Gets a parameter option that has been set for a matrix.
5576: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5578: Input Parameters:
5579: + mat - the matrix
5580: - option - the option, this only responds to certain options, check the code for which ones
5582: Output Parameter:
5583: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5585: Notes:
5586: Can only be called after MatSetSizes() and MatSetType() have been set.
5588: Level: intermediate
5590: Concepts: matrices^setting options
5592: .seealso: MatOption, MatSetOption()
5594: @*/
5595: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5596: {
5601: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5602: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");
5604: switch (op) {
5605: case MAT_NO_OFF_PROC_ENTRIES:
5606: *flg = mat->nooffprocentries;
5607: break;
5608: case MAT_NO_OFF_PROC_ZERO_ROWS:
5609: *flg = mat->nooffproczerorows;
5610: break;
5611: case MAT_SYMMETRIC:
5612: *flg = mat->symmetric;
5613: break;
5614: case MAT_HERMITIAN:
5615: *flg = mat->hermitian;
5616: break;
5617: case MAT_STRUCTURALLY_SYMMETRIC:
5618: *flg = mat->structurally_symmetric;
5619: break;
5620: case MAT_SYMMETRY_ETERNAL:
5621: *flg = mat->symmetric_eternal;
5622: break;
5623: case MAT_SPD:
5624: *flg = mat->spd;
5625: break;
5626: default:
5627: break;
5628: }
5629: return(0);
5630: }
5632: /*@
5633: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5634: this routine retains the old nonzero structure.
5636: Logically Collective on Mat
5638: Input Parameters:
5639: . mat - the matrix
5641: Level: intermediate
5643: Notes:
5644: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5645: See the Performance chapter of the users manual for information on preallocating matrices.
5647: Concepts: matrices^zeroing
5649: .seealso: MatZeroRows()
5650: @*/
5651: PetscErrorCode MatZeroEntries(Mat mat)
5652: {
5658: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5659: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5660: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5661: MatCheckPreallocated(mat,1);
5663: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5664: (*mat->ops->zeroentries)(mat);
5665: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5666: PetscObjectStateIncrease((PetscObject)mat);
5667: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5668: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5669: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5670: }
5671: #endif
5672: return(0);
5673: }
5675: /*@
5676: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5677: of a set of rows and columns of a matrix.
5679: Collective on Mat
5681: Input Parameters:
5682: + mat - the matrix
5683: . numRows - the number of rows to remove
5684: . rows - the global row indices
5685: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5686: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5687: - b - optional vector of right hand side, that will be adjusted by provided solution
5689: Notes:
5690: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5692: The user can set a value in the diagonal entry (or for the AIJ and
5693: row formats can optionally remove the main diagonal entry from the
5694: nonzero structure as well, by passing 0.0 as the final argument).
5696: For the parallel case, all processes that share the matrix (i.e.,
5697: those in the communicator used for matrix creation) MUST call this
5698: routine, regardless of whether any rows being zeroed are owned by
5699: them.
5701: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5702: list only rows local to itself).
5704: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5706: Level: intermediate
5708: Concepts: matrices^zeroing rows
5710: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5711: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5712: @*/
5713: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5714: {
5721: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5722: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5723: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5724: MatCheckPreallocated(mat,1);
5726: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5727: MatViewFromOptions(mat,NULL,"-mat_view");
5728: PetscObjectStateIncrease((PetscObject)mat);
5729: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5730: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5731: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5732: }
5733: #endif
5734: return(0);
5735: }
5737: /*@
5738: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5739: of a set of rows and columns of a matrix.
5741: Collective on Mat
5743: Input Parameters:
5744: + mat - the matrix
5745: . is - the rows to zero
5746: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5747: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5748: - b - optional vector of right hand side, that will be adjusted by provided solution
5750: Notes:
5751: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5753: The user can set a value in the diagonal entry (or for the AIJ and
5754: row formats can optionally remove the main diagonal entry from the
5755: nonzero structure as well, by passing 0.0 as the final argument).
5757: For the parallel case, all processes that share the matrix (i.e.,
5758: those in the communicator used for matrix creation) MUST call this
5759: routine, regardless of whether any rows being zeroed are owned by
5760: them.
5762: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5763: list only rows local to itself).
5765: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5767: Level: intermediate
5769: Concepts: matrices^zeroing rows
5771: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5772: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5773: @*/
5774: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5775: {
5777: PetscInt numRows;
5778: const PetscInt *rows;
5785: ISGetLocalSize(is,&numRows);
5786: ISGetIndices(is,&rows);
5787: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5788: ISRestoreIndices(is,&rows);
5789: return(0);
5790: }
5792: /*@
5793: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5794: of a set of rows of a matrix.
5796: Collective on Mat
5798: Input Parameters:
5799: + mat - the matrix
5800: . numRows - the number of rows to remove
5801: . rows - the global row indices
5802: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5803: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5804: - b - optional vector of right hand side, that will be adjusted by provided solution
5806: Notes:
5807: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5808: but does not release memory. For the dense and block diagonal
5809: formats this does not alter the nonzero structure.
5811: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5812: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5813: merely zeroed.
5815: The user can set a value in the diagonal entry (or for the AIJ and
5816: row formats can optionally remove the main diagonal entry from the
5817: nonzero structure as well, by passing 0.0 as the final argument).
5819: For the parallel case, all processes that share the matrix (i.e.,
5820: those in the communicator used for matrix creation) MUST call this
5821: routine, regardless of whether any rows being zeroed are owned by
5822: them.
5824: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5825: list only rows local to itself).
5827: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5828: owns that are to be zeroed. This saves a global synchronization in the implementation.
5830: Level: intermediate
5832: Concepts: matrices^zeroing rows
5834: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5835: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5836: @*/
5837: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5838: {
5845: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5846: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5847: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5848: MatCheckPreallocated(mat,1);
5850: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5851: MatViewFromOptions(mat,NULL,"-mat_view");
5852: PetscObjectStateIncrease((PetscObject)mat);
5853: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
5854: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
5855: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
5856: }
5857: #endif
5858: return(0);
5859: }
5861: /*@
5862: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5863: of a set of rows of a matrix.
5865: Collective on Mat
5867: Input Parameters:
5868: + mat - the matrix
5869: . is - index set of rows to remove
5870: . diag - value put in all diagonals of eliminated rows
5871: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5872: - b - optional vector of right hand side, that will be adjusted by provided solution
5874: Notes:
5875: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5876: but does not release memory. For the dense and block diagonal
5877: formats this does not alter the nonzero structure.
5879: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5880: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5881: merely zeroed.
5883: The user can set a value in the diagonal entry (or for the AIJ and
5884: row formats can optionally remove the main diagonal entry from the
5885: nonzero structure as well, by passing 0.0 as the final argument).
5887: For the parallel case, all processes that share the matrix (i.e.,
5888: those in the communicator used for matrix creation) MUST call this
5889: routine, regardless of whether any rows being zeroed are owned by
5890: them.
5892: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5893: list only rows local to itself).
5895: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5896: owns that are to be zeroed. This saves a global synchronization in the implementation.
5898: Level: intermediate
5900: Concepts: matrices^zeroing rows
5902: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5903: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5904: @*/
5905: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5906: {
5907: PetscInt numRows;
5908: const PetscInt *rows;
5915: ISGetLocalSize(is,&numRows);
5916: ISGetIndices(is,&rows);
5917: MatZeroRows(mat,numRows,rows,diag,x,b);
5918: ISRestoreIndices(is,&rows);
5919: return(0);
5920: }
5922: /*@
5923: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5924: of a set of rows of a matrix. These rows must be local to the process.
5926: Collective on Mat
5928: Input Parameters:
5929: + mat - the matrix
5930: . numRows - the number of rows to remove
5931: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5932: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5933: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5934: - b - optional vector of right hand side, that will be adjusted by provided solution
5936: Notes:
5937: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5938: but does not release memory. For the dense and block diagonal
5939: formats this does not alter the nonzero structure.
5941: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5942: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5943: merely zeroed.
5945: The user can set a value in the diagonal entry (or for the AIJ and
5946: row formats can optionally remove the main diagonal entry from the
5947: nonzero structure as well, by passing 0.0 as the final argument).
5949: For the parallel case, all processes that share the matrix (i.e.,
5950: those in the communicator used for matrix creation) MUST call this
5951: routine, regardless of whether any rows being zeroed are owned by
5952: them.
5954: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5955: list only rows local to itself).
5957: The grid coordinates are across the entire grid, not just the local portion
5959: In Fortran idxm and idxn should be declared as
5960: $ MatStencil idxm(4,m)
5961: and the values inserted using
5962: $ idxm(MatStencil_i,1) = i
5963: $ idxm(MatStencil_j,1) = j
5964: $ idxm(MatStencil_k,1) = k
5965: $ idxm(MatStencil_c,1) = c
5966: etc
5968: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5969: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5970: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5971: DM_BOUNDARY_PERIODIC boundary type.
5973: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5974: a single value per point) you can skip filling those indices.
5976: Level: intermediate
5978: Concepts: matrices^zeroing rows
5980: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5981: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5982: @*/
5983: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5984: {
5985: PetscInt dim = mat->stencil.dim;
5986: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5987: PetscInt *dims = mat->stencil.dims+1;
5988: PetscInt *starts = mat->stencil.starts;
5989: PetscInt *dxm = (PetscInt*) rows;
5990: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5998: PetscMalloc1(numRows, &jdxm);
5999: for (i = 0; i < numRows; ++i) {
6000: /* Skip unused dimensions (they are ordered k, j, i, c) */
6001: for (j = 0; j < 3-sdim; ++j) dxm++;
6002: /* Local index in X dir */
6003: tmp = *dxm++ - starts[0];
6004: /* Loop over remaining dimensions */
6005: for (j = 0; j < dim-1; ++j) {
6006: /* If nonlocal, set index to be negative */
6007: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6008: /* Update local index */
6009: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6010: }
6011: /* Skip component slot if necessary */
6012: if (mat->stencil.noc) dxm++;
6013: /* Local row number */
6014: if (tmp >= 0) {
6015: jdxm[numNewRows++] = tmp;
6016: }
6017: }
6018: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
6019: PetscFree(jdxm);
6020: return(0);
6021: }
6023: /*@
6024: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
6025: of a set of rows and columns of a matrix.
6027: Collective on Mat
6029: Input Parameters:
6030: + mat - the matrix
6031: . numRows - the number of rows/columns to remove
6032: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
6033: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
6034: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6035: - b - optional vector of right hand side, that will be adjusted by provided solution
6037: Notes:
6038: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
6039: but does not release memory. For the dense and block diagonal
6040: formats this does not alter the nonzero structure.
6042: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6043: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6044: merely zeroed.
6046: The user can set a value in the diagonal entry (or for the AIJ and
6047: row formats can optionally remove the main diagonal entry from the
6048: nonzero structure as well, by passing 0.0 as the final argument).
6050: For the parallel case, all processes that share the matrix (i.e.,
6051: those in the communicator used for matrix creation) MUST call this
6052: routine, regardless of whether any rows being zeroed are owned by
6053: them.
6055: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
6056: list only rows local to itself, but the row/column numbers are given in local numbering).
6058: The grid coordinates are across the entire grid, not just the local portion
6060: In Fortran idxm and idxn should be declared as
6061: $ MatStencil idxm(4,m)
6062: and the values inserted using
6063: $ idxm(MatStencil_i,1) = i
6064: $ idxm(MatStencil_j,1) = j
6065: $ idxm(MatStencil_k,1) = k
6066: $ idxm(MatStencil_c,1) = c
6067: etc
6069: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
6070: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
6071: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
6072: DM_BOUNDARY_PERIODIC boundary type.
6074: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
6075: a single value per point) you can skip filling those indices.
6077: Level: intermediate
6079: Concepts: matrices^zeroing rows
6081: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6082: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6083: @*/
6084: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6085: {
6086: PetscInt dim = mat->stencil.dim;
6087: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
6088: PetscInt *dims = mat->stencil.dims+1;
6089: PetscInt *starts = mat->stencil.starts;
6090: PetscInt *dxm = (PetscInt*) rows;
6091: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
6099: PetscMalloc1(numRows, &jdxm);
6100: for (i = 0; i < numRows; ++i) {
6101: /* Skip unused dimensions (they are ordered k, j, i, c) */
6102: for (j = 0; j < 3-sdim; ++j) dxm++;
6103: /* Local index in X dir */
6104: tmp = *dxm++ - starts[0];
6105: /* Loop over remaining dimensions */
6106: for (j = 0; j < dim-1; ++j) {
6107: /* If nonlocal, set index to be negative */
6108: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6109: /* Update local index */
6110: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6111: }
6112: /* Skip component slot if necessary */
6113: if (mat->stencil.noc) dxm++;
6114: /* Local row number */
6115: if (tmp >= 0) {
6116: jdxm[numNewRows++] = tmp;
6117: }
6118: }
6119: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6120: PetscFree(jdxm);
6121: return(0);
6122: }
6124: /*@C
6125: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6126: of a set of rows of a matrix; using local numbering of rows.
6128: Collective on Mat
6130: Input Parameters:
6131: + mat - the matrix
6132: . numRows - the number of rows to remove
6133: . rows - the global row indices
6134: . diag - value put in all diagonals of eliminated rows
6135: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6136: - b - optional vector of right hand side, that will be adjusted by provided solution
6138: Notes:
6139: Before calling MatZeroRowsLocal(), the user must first set the
6140: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6142: For the AIJ matrix formats this removes the old nonzero structure,
6143: but does not release memory. For the dense and block diagonal
6144: formats this does not alter the nonzero structure.
6146: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6147: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6148: merely zeroed.
6150: The user can set a value in the diagonal entry (or for the AIJ and
6151: row formats can optionally remove the main diagonal entry from the
6152: nonzero structure as well, by passing 0.0 as the final argument).
6154: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6155: owns that are to be zeroed. This saves a global synchronization in the implementation.
6157: Level: intermediate
6159: Concepts: matrices^zeroing
6161: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6162: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6163: @*/
6164: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6165: {
6172: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6173: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6174: MatCheckPreallocated(mat,1);
6176: if (mat->ops->zerorowslocal) {
6177: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6178: } else {
6179: IS is, newis;
6180: const PetscInt *newRows;
6182: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6183: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6184: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6185: ISGetIndices(newis,&newRows);
6186: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6187: ISRestoreIndices(newis,&newRows);
6188: ISDestroy(&newis);
6189: ISDestroy(&is);
6190: }
6191: PetscObjectStateIncrease((PetscObject)mat);
6192: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6193: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6194: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6195: }
6196: #endif
6197: return(0);
6198: }
6200: /*@
6201: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6202: of a set of rows of a matrix; using local numbering of rows.
6204: Collective on Mat
6206: Input Parameters:
6207: + mat - the matrix
6208: . is - index set of rows to remove
6209: . diag - value put in all diagonals of eliminated rows
6210: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6211: - b - optional vector of right hand side, that will be adjusted by provided solution
6213: Notes:
6214: Before calling MatZeroRowsLocalIS(), the user must first set the
6215: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6217: For the AIJ matrix formats this removes the old nonzero structure,
6218: but does not release memory. For the dense and block diagonal
6219: formats this does not alter the nonzero structure.
6221: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6222: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6223: merely zeroed.
6225: The user can set a value in the diagonal entry (or for the AIJ and
6226: row formats can optionally remove the main diagonal entry from the
6227: nonzero structure as well, by passing 0.0 as the final argument).
6229: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6230: owns that are to be zeroed. This saves a global synchronization in the implementation.
6232: Level: intermediate
6234: Concepts: matrices^zeroing
6236: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6237: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6238: @*/
6239: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6240: {
6242: PetscInt numRows;
6243: const PetscInt *rows;
6249: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6250: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6251: MatCheckPreallocated(mat,1);
6253: ISGetLocalSize(is,&numRows);
6254: ISGetIndices(is,&rows);
6255: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6256: ISRestoreIndices(is,&rows);
6257: return(0);
6258: }
6260: /*@
6261: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6262: of a set of rows and columns of a matrix; using local numbering of rows.
6264: Collective on Mat
6266: Input Parameters:
6267: + mat - the matrix
6268: . numRows - the number of rows to remove
6269: . rows - the global row indices
6270: . diag - value put in all diagonals of eliminated rows
6271: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6272: - b - optional vector of right hand side, that will be adjusted by provided solution
6274: Notes:
6275: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6276: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6278: The user can set a value in the diagonal entry (or for the AIJ and
6279: row formats can optionally remove the main diagonal entry from the
6280: nonzero structure as well, by passing 0.0 as the final argument).
6282: Level: intermediate
6284: Concepts: matrices^zeroing
6286: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6287: MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6288: @*/
6289: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6290: {
6292: IS is, newis;
6293: const PetscInt *newRows;
6299: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6300: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6301: MatCheckPreallocated(mat,1);
6303: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6304: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6305: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6306: ISGetIndices(newis,&newRows);
6307: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6308: ISRestoreIndices(newis,&newRows);
6309: ISDestroy(&newis);
6310: ISDestroy(&is);
6311: PetscObjectStateIncrease((PetscObject)mat);
6312: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_VECCUDA)
6313: if (mat->valid_GPU_matrix != PETSC_OFFLOAD_UNALLOCATED) {
6314: mat->valid_GPU_matrix = PETSC_OFFLOAD_CPU;
6315: }
6316: #endif
6317: return(0);
6318: }
6320: /*@
6321: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6322: of a set of rows and columns of a matrix; using local numbering of rows.
6324: Collective on Mat
6326: Input Parameters:
6327: + mat - the matrix
6328: . is - index set of rows to remove
6329: . diag - value put in all diagonals of eliminated rows
6330: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6331: - b - optional vector of right hand side, that will be adjusted by provided solution
6333: Notes:
6334: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6335: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6337: The user can set a value in the diagonal entry (or for the AIJ and
6338: row formats can optionally remove the main diagonal entry from the
6339: nonzero structure as well, by passing 0.0 as the final argument).
6341: Level: intermediate
6343: Concepts: matrices^zeroing
6345: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6346: MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6347: @*/
6348: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6349: {
6351: PetscInt numRows;
6352: const PetscInt *rows;
6358: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6359: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6360: MatCheckPreallocated(mat,1);
6362: ISGetLocalSize(is,&numRows);
6363: ISGetIndices(is,&rows);
6364: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6365: ISRestoreIndices(is,&rows);
6366: return(0);
6367: }
6369: /*@C
6370: MatGetSize - Returns the numbers of rows and columns in a matrix.
6372: Not Collective
6374: Input Parameter:
6375: . mat - the matrix
6377: Output Parameters:
6378: + m - the number of global rows
6379: - n - the number of global columns
6381: Note: both output parameters can be NULL on input.
6383: Level: beginner
6385: Concepts: matrices^size
6387: .seealso: MatGetLocalSize()
6388: @*/
6389: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6390: {
6393: if (m) *m = mat->rmap->N;
6394: if (n) *n = mat->cmap->N;
6395: return(0);
6396: }
6398: /*@C
6399: MatGetLocalSize - Returns the number of rows and columns in a matrix
6400: stored locally. This information may be implementation dependent, so
6401: use with care.
6403: Not Collective
6405: Input Parameters:
6406: . mat - the matrix
6408: Output Parameters:
6409: + m - the number of local rows
6410: - n - the number of local columns
6412: Note: both output parameters can be NULL on input.
6414: Level: beginner
6416: Concepts: matrices^local size
6418: .seealso: MatGetSize()
6419: @*/
6420: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6421: {
6426: if (m) *m = mat->rmap->n;
6427: if (n) *n = mat->cmap->n;
6428: return(0);
6429: }
6431: /*@C
6432: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6433: this processor. (The columns of the "diagonal block")
6435: Not Collective, unless matrix has not been allocated, then collective on Mat
6437: Input Parameters:
6438: . mat - the matrix
6440: Output Parameters:
6441: + m - the global index of the first local column
6442: - n - one more than the global index of the last local column
6444: Notes:
6445: both output parameters can be NULL on input.
6447: Level: developer
6449: Concepts: matrices^column ownership
6451: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6453: @*/
6454: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6455: {
6461: MatCheckPreallocated(mat,1);
6462: if (m) *m = mat->cmap->rstart;
6463: if (n) *n = mat->cmap->rend;
6464: return(0);
6465: }
6467: /*@C
6468: MatGetOwnershipRange - Returns the range of matrix rows owned by
6469: this processor, assuming that the matrix is laid out with the first
6470: n1 rows on the first processor, the next n2 rows on the second, etc.
6471: For certain parallel layouts this range may not be well defined.
6473: Not Collective
6475: Input Parameters:
6476: . mat - the matrix
6478: Output Parameters:
6479: + m - the global index of the first local row
6480: - n - one more than the global index of the last local row
6482: Note: Both output parameters can be NULL on input.
6483: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6484: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6485: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6487: Level: beginner
6489: Concepts: matrices^row ownership
6491: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6493: @*/
6494: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6495: {
6501: MatCheckPreallocated(mat,1);
6502: if (m) *m = mat->rmap->rstart;
6503: if (n) *n = mat->rmap->rend;
6504: return(0);
6505: }
6507: /*@C
6508: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6509: each process
6511: Not Collective, unless matrix has not been allocated, then collective on Mat
6513: Input Parameters:
6514: . mat - the matrix
6516: Output Parameters:
6517: . ranges - start of each processors portion plus one more than the total length at the end
6519: Level: beginner
6521: Concepts: matrices^row ownership
6523: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6525: @*/
6526: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6527: {
6533: MatCheckPreallocated(mat,1);
6534: PetscLayoutGetRanges(mat->rmap,ranges);
6535: return(0);
6536: }
6538: /*@C
6539: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6540: this processor. (The columns of the "diagonal blocks" for each process)
6542: Not Collective, unless matrix has not been allocated, then collective on Mat
6544: Input Parameters:
6545: . mat - the matrix
6547: Output Parameters:
6548: . ranges - start of each processors portion plus one more then the total length at the end
6550: Level: beginner
6552: Concepts: matrices^column ownership
6554: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6556: @*/
6557: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6558: {
6564: MatCheckPreallocated(mat,1);
6565: PetscLayoutGetRanges(mat->cmap,ranges);
6566: return(0);
6567: }
6569: /*@C
6570: MatGetOwnershipIS - Get row and column ownership as index sets
6572: Not Collective
6574: Input Arguments:
6575: . A - matrix of type Elemental
6577: Output Arguments:
6578: + rows - rows in which this process owns elements
6579: . cols - columns in which this process owns elements
6581: Level: intermediate
6583: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL
6584: @*/
6585: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6586: {
6587: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6590: MatCheckPreallocated(A,1);
6591: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6592: if (f) {
6593: (*f)(A,rows,cols);
6594: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6595: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6596: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6597: }
6598: return(0);
6599: }
6601: /*@C
6602: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6603: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6604: to complete the factorization.
6606: Collective on Mat
6608: Input Parameters:
6609: + mat - the matrix
6610: . row - row permutation
6611: . column - column permutation
6612: - info - structure containing
6613: $ levels - number of levels of fill.
6614: $ expected fill - as ratio of original fill.
6615: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6616: missing diagonal entries)
6618: Output Parameters:
6619: . fact - new matrix that has been symbolically factored
6621: Notes:
6622: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6624: Most users should employ the simplified KSP interface for linear solvers
6625: instead of working directly with matrix algebra routines such as this.
6626: See, e.g., KSPCreate().
6628: Level: developer
6630: Concepts: matrices^symbolic LU factorization
6631: Concepts: matrices^factorization
6632: Concepts: LU^symbolic factorization
6634: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6635: MatGetOrdering(), MatFactorInfo
6637: Developer Note: fortran interface is not autogenerated as the f90
6638: interface defintion cannot be generated correctly [due to MatFactorInfo]
6640: @*/
6641: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6642: {
6652: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6653: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6654: if (!(fact)->ops->ilufactorsymbolic) {
6655: MatSolverType spackage;
6656: MatFactorGetSolverType(fact,&spackage);
6657: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6658: }
6659: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6660: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6661: MatCheckPreallocated(mat,2);
6663: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6664: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6665: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6666: return(0);
6667: }
6669: /*@C
6670: MatICCFactorSymbolic - Performs symbolic incomplete
6671: Cholesky factorization for a symmetric matrix. Use
6672: MatCholeskyFactorNumeric() to complete the factorization.
6674: Collective on Mat
6676: Input Parameters:
6677: + mat - the matrix
6678: . perm - row and column permutation
6679: - info - structure containing
6680: $ levels - number of levels of fill.
6681: $ expected fill - as ratio of original fill.
6683: Output Parameter:
6684: . fact - the factored matrix
6686: Notes:
6687: Most users should employ the KSP interface for linear solvers
6688: instead of working directly with matrix algebra routines such as this.
6689: See, e.g., KSPCreate().
6691: Level: developer
6693: Concepts: matrices^symbolic incomplete Cholesky factorization
6694: Concepts: matrices^factorization
6695: Concepts: Cholsky^symbolic factorization
6697: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6699: Developer Note: fortran interface is not autogenerated as the f90
6700: interface defintion cannot be generated correctly [due to MatFactorInfo]
6702: @*/
6703: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6704: {
6713: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6714: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6715: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6716: if (!(fact)->ops->iccfactorsymbolic) {
6717: MatSolverType spackage;
6718: MatFactorGetSolverType(fact,&spackage);
6719: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6720: }
6721: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6722: MatCheckPreallocated(mat,2);
6724: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6725: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6726: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6727: return(0);
6728: }
6730: /*@C
6731: MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6732: points to an array of valid matrices, they may be reused to store the new
6733: submatrices.
6735: Collective on Mat
6737: Input Parameters:
6738: + mat - the matrix
6739: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6740: . irow, icol - index sets of rows and columns to extract
6741: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6743: Output Parameter:
6744: . submat - the array of submatrices
6746: Notes:
6747: MatCreateSubMatrices() can extract ONLY sequential submatrices
6748: (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6749: to extract a parallel submatrix.
6751: Some matrix types place restrictions on the row and column
6752: indices, such as that they be sorted or that they be equal to each other.
6754: The index sets may not have duplicate entries.
6756: When extracting submatrices from a parallel matrix, each processor can
6757: form a different submatrix by setting the rows and columns of its
6758: individual index sets according to the local submatrix desired.
6760: When finished using the submatrices, the user should destroy
6761: them with MatDestroySubMatrices().
6763: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6764: original matrix has not changed from that last call to MatCreateSubMatrices().
6766: This routine creates the matrices in submat; you should NOT create them before
6767: calling it. It also allocates the array of matrix pointers submat.
6769: For BAIJ matrices the index sets must respect the block structure, that is if they
6770: request one row/column in a block, they must request all rows/columns that are in
6771: that block. For example, if the block size is 2 you cannot request just row 0 and
6772: column 0.
6774: Fortran Note:
6775: The Fortran interface is slightly different from that given below; it
6776: requires one to pass in as submat a Mat (integer) array of size at least n+1.
6778: Level: advanced
6780: Concepts: matrices^accessing submatrices
6781: Concepts: submatrices
6783: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6784: @*/
6785: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6786: {
6788: PetscInt i;
6789: PetscBool eq;
6794: if (n) {
6799: }
6801: if (n && scall == MAT_REUSE_MATRIX) {
6804: }
6805: if (!mat->ops->createsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6806: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6807: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6808: MatCheckPreallocated(mat,1);
6810: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6811: (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6812: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6813: for (i=0; i<n; i++) {
6814: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6815: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6816: ISEqual(irow[i],icol[i],&eq);
6817: if (eq) {
6818: if (mat->symmetric) {
6819: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6820: } else if (mat->hermitian) {
6821: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6822: } else if (mat->structurally_symmetric) {
6823: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6824: }
6825: }
6826: }
6827: }
6828: return(0);
6829: }
6831: /*@C
6832: MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).
6834: Collective on Mat
6836: Input Parameters:
6837: + mat - the matrix
6838: . n - the number of submatrixes to be extracted
6839: . irow, icol - index sets of rows and columns to extract
6840: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6842: Output Parameter:
6843: . submat - the array of submatrices
6845: Level: advanced
6847: Concepts: matrices^accessing submatrices
6848: Concepts: submatrices
6850: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6851: @*/
6852: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6853: {
6855: PetscInt i;
6856: PetscBool eq;
6861: if (n) {
6866: }
6868: if (n && scall == MAT_REUSE_MATRIX) {
6871: }
6872: if (!mat->ops->createsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6873: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6874: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6875: MatCheckPreallocated(mat,1);
6877: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6878: (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6879: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6880: for (i=0; i<n; i++) {
6881: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6882: ISEqual(irow[i],icol[i],&eq);
6883: if (eq) {
6884: if (mat->symmetric) {
6885: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6886: } else if (mat->hermitian) {
6887: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6888: } else if (mat->structurally_symmetric) {
6889: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6890: }
6891: }
6892: }
6893: }
6894: return(0);
6895: }
6897: /*@C
6898: MatDestroyMatrices - Destroys an array of matrices.
6900: Collective on Mat
6902: Input Parameters:
6903: + n - the number of local matrices
6904: - mat - the matrices (note that this is a pointer to the array of matrices)
6906: Level: advanced
6908: Notes:
6909: Frees not only the matrices, but also the array that contains the matrices
6910: In Fortran will not free the array.
6912: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6913: @*/
6914: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6915: {
6917: PetscInt i;
6920: if (!*mat) return(0);
6921: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6924: for (i=0; i<n; i++) {
6925: MatDestroy(&(*mat)[i]);
6926: }
6928: /* memory is allocated even if n = 0 */
6929: PetscFree(*mat);
6930: return(0);
6931: }
6933: /*@C
6934: MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().
6936: Collective on Mat
6938: Input Parameters:
6939: + n - the number of local matrices
6940: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6941: sequence of MatCreateSubMatrices())
6943: Level: advanced
6945: Notes:
6946: Frees not only the matrices, but also the array that contains the matrices
6947: In Fortran will not free the array.
6949: .seealso: MatCreateSubMatrices()
6950: @*/
6951: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6952: {
6954: Mat mat0;
6957: if (!*mat) return(0);
6958: /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6959: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6962: mat0 = (*mat)[0];
6963: if (mat0 && mat0->ops->destroysubmatrices) {
6964: (mat0->ops->destroysubmatrices)(n,mat);
6965: } else {
6966: MatDestroyMatrices(n,mat);
6967: }
6968: return(0);
6969: }
6971: /*@C
6972: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6974: Collective on Mat
6976: Input Parameters:
6977: . mat - the matrix
6979: Output Parameter:
6980: . matstruct - the sequential matrix with the nonzero structure of mat
6982: Level: intermediate
6984: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6985: @*/
6986: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6987: {
6995: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6996: MatCheckPreallocated(mat,1);
6998: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6999: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
7000: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
7001: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
7002: return(0);
7003: }
7005: /*@C
7006: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
7008: Collective on Mat
7010: Input Parameters:
7011: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
7012: sequence of MatGetSequentialNonzeroStructure())
7014: Level: advanced
7016: Notes:
7017: Frees not only the matrices, but also the array that contains the matrices
7019: .seealso: MatGetSeqNonzeroStructure()
7020: @*/
7021: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
7022: {
7027: MatDestroy(mat);
7028: return(0);
7029: }
7031: /*@
7032: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
7033: replaces the index sets by larger ones that represent submatrices with
7034: additional overlap.
7036: Collective on Mat
7038: Input Parameters:
7039: + mat - the matrix
7040: . n - the number of index sets
7041: . is - the array of index sets (these index sets will changed during the call)
7042: - ov - the additional overlap requested
7044: Options Database:
7045: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
7047: Level: developer
7049: Concepts: overlap
7050: Concepts: ASM^computing overlap
7052: .seealso: MatCreateSubMatrices()
7053: @*/
7054: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
7055: {
7061: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7062: if (n) {
7065: }
7066: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7067: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7068: MatCheckPreallocated(mat,1);
7070: if (!ov) return(0);
7071: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7072: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7073: (*mat->ops->increaseoverlap)(mat,n,is,ov);
7074: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7075: return(0);
7076: }
7079: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);
7081: /*@
7082: MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
7083: a sub communicator, replaces the index sets by larger ones that represent submatrices with
7084: additional overlap.
7086: Collective on Mat
7088: Input Parameters:
7089: + mat - the matrix
7090: . n - the number of index sets
7091: . is - the array of index sets (these index sets will changed during the call)
7092: - ov - the additional overlap requested
7094: Options Database:
7095: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
7097: Level: developer
7099: Concepts: overlap
7100: Concepts: ASM^computing overlap
7102: .seealso: MatCreateSubMatrices()
7103: @*/
7104: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
7105: {
7106: PetscInt i;
7112: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
7113: if (n) {
7116: }
7117: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
7118: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7119: MatCheckPreallocated(mat,1);
7120: if (!ov) return(0);
7121: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7122: for(i=0; i<n; i++){
7123: MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7124: }
7125: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7126: return(0);
7127: }
7132: /*@
7133: MatGetBlockSize - Returns the matrix block size.
7135: Not Collective
7137: Input Parameter:
7138: . mat - the matrix
7140: Output Parameter:
7141: . bs - block size
7143: Notes:
7144: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7146: If the block size has not been set yet this routine returns 1.
7148: Level: intermediate
7150: Concepts: matrices^block size
7152: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7153: @*/
7154: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7155: {
7159: *bs = PetscAbs(mat->rmap->bs);
7160: return(0);
7161: }
7163: /*@
7164: MatGetBlockSizes - Returns the matrix block row and column sizes.
7166: Not Collective
7168: Input Parameter:
7169: . mat - the matrix
7171: Output Parameter:
7172: . rbs - row block size
7173: . cbs - column block size
7175: Notes:
7176: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7177: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7179: If a block size has not been set yet this routine returns 1.
7181: Level: intermediate
7183: Concepts: matrices^block size
7185: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7186: @*/
7187: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7188: {
7193: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7194: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7195: return(0);
7196: }
7198: /*@
7199: MatSetBlockSize - Sets the matrix block size.
7201: Logically Collective on Mat
7203: Input Parameters:
7204: + mat - the matrix
7205: - bs - block size
7207: Notes:
7208: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7209: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.
7211: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7212: is compatible with the matrix local sizes.
7214: Level: intermediate
7216: Concepts: matrices^block size
7218: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7219: @*/
7220: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7221: {
7227: MatSetBlockSizes(mat,bs,bs);
7228: return(0);
7229: }
7231: /*@
7232: MatSetVariableBlockSizes - Sets a diagonal blocks of the matrix that need not be of the same size
7234: Logically Collective on Mat
7236: Input Parameters:
7237: + mat - the matrix
7238: . nblocks - the number of blocks on this process
7239: - bsizes - the block sizes
7241: Notes:
7242: Currently used by PCVPBJACOBI for SeqAIJ matrices
7244: Level: intermediate
7246: Concepts: matrices^block size
7248: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatGetVariableBlockSizes()
7249: @*/
7250: PetscErrorCode MatSetVariableBlockSizes(Mat mat,PetscInt nblocks,PetscInt *bsizes)
7251: {
7253: PetscInt i,ncnt = 0, nlocal;
7257: if (nblocks < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Number of local blocks must be great than or equal to zero");
7258: MatGetLocalSize(mat,&nlocal,NULL);
7259: for (i=0; i<nblocks; i++) ncnt += bsizes[i];
7260: if (ncnt != nlocal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Sum of local block sizes %D does not equal local size of matrix %D",ncnt,nlocal);
7261: PetscFree(mat->bsizes);
7262: mat->nblocks = nblocks;
7263: PetscMalloc1(nblocks,&mat->bsizes);
7264: PetscMemcpy(mat->bsizes,bsizes,nblocks*sizeof(PetscInt));
7265: return(0);
7266: }
7268: /*@C
7269: MatGetVariableBlockSizes - Gets a diagonal blocks of the matrix that need not be of the same size
7271: Logically Collective on Mat
7273: Input Parameters:
7274: . mat - the matrix
7276: Output Parameters:
7277: + nblocks - the number of blocks on this process
7278: - bsizes - the block sizes
7280: Notes: Currently not supported from Fortran
7282: Level: intermediate
7284: Concepts: matrices^block size
7286: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatSetVariableBlockSizes()
7287: @*/
7288: PetscErrorCode MatGetVariableBlockSizes(Mat mat,PetscInt *nblocks,const PetscInt **bsizes)
7289: {
7292: *nblocks = mat->nblocks;
7293: *bsizes = mat->bsizes;
7294: return(0);
7295: }
7297: /*@
7298: MatSetBlockSizes - Sets the matrix block row and column sizes.
7300: Logically Collective on Mat
7302: Input Parameters:
7303: + mat - the matrix
7304: - rbs - row block size
7305: - cbs - column block size
7307: Notes:
7308: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7309: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7310: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
7312: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7313: are compatible with the matrix local sizes.
7315: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
7317: Level: intermediate
7319: Concepts: matrices^block size
7321: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7322: @*/
7323: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7324: {
7331: if (mat->ops->setblocksizes) {
7332: (*mat->ops->setblocksizes)(mat,rbs,cbs);
7333: }
7334: if (mat->rmap->refcnt) {
7335: ISLocalToGlobalMapping l2g = NULL;
7336: PetscLayout nmap = NULL;
7338: PetscLayoutDuplicate(mat->rmap,&nmap);
7339: if (mat->rmap->mapping) {
7340: ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7341: }
7342: PetscLayoutDestroy(&mat->rmap);
7343: mat->rmap = nmap;
7344: mat->rmap->mapping = l2g;
7345: }
7346: if (mat->cmap->refcnt) {
7347: ISLocalToGlobalMapping l2g = NULL;
7348: PetscLayout nmap = NULL;
7350: PetscLayoutDuplicate(mat->cmap,&nmap);
7351: if (mat->cmap->mapping) {
7352: ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7353: }
7354: PetscLayoutDestroy(&mat->cmap);
7355: mat->cmap = nmap;
7356: mat->cmap->mapping = l2g;
7357: }
7358: PetscLayoutSetBlockSize(mat->rmap,rbs);
7359: PetscLayoutSetBlockSize(mat->cmap,cbs);
7360: return(0);
7361: }
7363: /*@
7364: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
7366: Logically Collective on Mat
7368: Input Parameters:
7369: + mat - the matrix
7370: . fromRow - matrix from which to copy row block size
7371: - fromCol - matrix from which to copy column block size (can be same as fromRow)
7373: Level: developer
7375: Concepts: matrices^block size
7377: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7378: @*/
7379: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7380: {
7387: if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7388: if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7389: return(0);
7390: }
7392: /*@
7393: MatResidual - Default routine to calculate the residual.
7395: Collective on Mat and Vec
7397: Input Parameters:
7398: + mat - the matrix
7399: . b - the right-hand-side
7400: - x - the approximate solution
7402: Output Parameter:
7403: . r - location to store the residual
7405: Level: developer
7407: .keywords: MG, default, multigrid, residual
7409: .seealso: PCMGSetResidual()
7410: @*/
7411: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7412: {
7421: MatCheckPreallocated(mat,1);
7422: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7423: if (!mat->ops->residual) {
7424: MatMult(mat,x,r);
7425: VecAYPX(r,-1.0,b);
7426: } else {
7427: (*mat->ops->residual)(mat,b,x,r);
7428: }
7429: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7430: return(0);
7431: }
7433: /*@C
7434: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7436: Collective on Mat
7438: Input Parameters:
7439: + mat - the matrix
7440: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7441: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7442: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7443: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7444: always used.
7446: Output Parameters:
7447: + n - number of rows in the (possibly compressed) matrix
7448: . ia - the row pointers [of length n+1]
7449: . ja - the column indices
7450: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7451: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7453: Level: developer
7455: Notes:
7456: You CANNOT change any of the ia[] or ja[] values.
7458: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values.
7460: Fortran Notes:
7461: In Fortran use
7462: $
7463: $ PetscInt ia(1), ja(1)
7464: $ PetscOffset iia, jja
7465: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7466: $ ! Access the ith and jth entries via ia(iia + i) and ja(jja + j)
7468: or
7469: $
7470: $ PetscInt, pointer :: ia(:),ja(:)
7471: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7472: $ ! Access the ith and jth entries via ia(i) and ja(j)
7474: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7475: @*/
7476: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7477: {
7487: MatCheckPreallocated(mat,1);
7488: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7489: else {
7490: *done = PETSC_TRUE;
7491: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7492: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7493: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7494: }
7495: return(0);
7496: }
7498: /*@C
7499: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7501: Collective on Mat
7503: Input Parameters:
7504: + mat - the matrix
7505: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7506: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7507: symmetrized
7508: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7509: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7510: always used.
7511: . n - number of columns in the (possibly compressed) matrix
7512: . ia - the column pointers
7513: - ja - the row indices
7515: Output Parameters:
7516: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7518: Note:
7519: This routine zeros out n, ia, and ja. This is to prevent accidental
7520: us of the array after it has been restored. If you pass NULL, it will
7521: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
7523: Level: developer
7525: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7526: @*/
7527: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7528: {
7538: MatCheckPreallocated(mat,1);
7539: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7540: else {
7541: *done = PETSC_TRUE;
7542: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7543: }
7544: return(0);
7545: }
7547: /*@C
7548: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7549: MatGetRowIJ().
7551: Collective on Mat
7553: Input Parameters:
7554: + mat - the matrix
7555: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7556: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7557: symmetrized
7558: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7559: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7560: always used.
7561: . n - size of (possibly compressed) matrix
7562: . ia - the row pointers
7563: - ja - the column indices
7565: Output Parameters:
7566: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7568: Note:
7569: This routine zeros out n, ia, and ja. This is to prevent accidental
7570: us of the array after it has been restored. If you pass NULL, it will
7571: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7573: Level: developer
7575: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7576: @*/
7577: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7578: {
7587: MatCheckPreallocated(mat,1);
7589: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7590: else {
7591: *done = PETSC_TRUE;
7592: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7593: if (n) *n = 0;
7594: if (ia) *ia = NULL;
7595: if (ja) *ja = NULL;
7596: }
7597: return(0);
7598: }
7600: /*@C
7601: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7602: MatGetColumnIJ().
7604: Collective on Mat
7606: Input Parameters:
7607: + mat - the matrix
7608: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7609: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7610: symmetrized
7611: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7612: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7613: always used.
7615: Output Parameters:
7616: + n - size of (possibly compressed) matrix
7617: . ia - the column pointers
7618: . ja - the row indices
7619: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7621: Level: developer
7623: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7624: @*/
7625: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7626: {
7635: MatCheckPreallocated(mat,1);
7637: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7638: else {
7639: *done = PETSC_TRUE;
7640: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7641: if (n) *n = 0;
7642: if (ia) *ia = NULL;
7643: if (ja) *ja = NULL;
7644: }
7645: return(0);
7646: }
7648: /*@C
7649: MatColoringPatch -Used inside matrix coloring routines that
7650: use MatGetRowIJ() and/or MatGetColumnIJ().
7652: Collective on Mat
7654: Input Parameters:
7655: + mat - the matrix
7656: . ncolors - max color value
7657: . n - number of entries in colorarray
7658: - colorarray - array indicating color for each column
7660: Output Parameters:
7661: . iscoloring - coloring generated using colorarray information
7663: Level: developer
7665: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7667: @*/
7668: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7669: {
7677: MatCheckPreallocated(mat,1);
7679: if (!mat->ops->coloringpatch) {
7680: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7681: } else {
7682: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7683: }
7684: return(0);
7685: }
7688: /*@
7689: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7691: Logically Collective on Mat
7693: Input Parameter:
7694: . mat - the factored matrix to be reset
7696: Notes:
7697: This routine should be used only with factored matrices formed by in-place
7698: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7699: format). This option can save memory, for example, when solving nonlinear
7700: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7701: ILU(0) preconditioner.
7703: Note that one can specify in-place ILU(0) factorization by calling
7704: .vb
7705: PCType(pc,PCILU);
7706: PCFactorSeUseInPlace(pc);
7707: .ve
7708: or by using the options -pc_type ilu -pc_factor_in_place
7710: In-place factorization ILU(0) can also be used as a local
7711: solver for the blocks within the block Jacobi or additive Schwarz
7712: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7713: for details on setting local solver options.
7715: Most users should employ the simplified KSP interface for linear solvers
7716: instead of working directly with matrix algebra routines such as this.
7717: See, e.g., KSPCreate().
7719: Level: developer
7721: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7723: Concepts: matrices^unfactored
7725: @*/
7726: PetscErrorCode MatSetUnfactored(Mat mat)
7727: {
7733: MatCheckPreallocated(mat,1);
7734: mat->factortype = MAT_FACTOR_NONE;
7735: if (!mat->ops->setunfactored) return(0);
7736: (*mat->ops->setunfactored)(mat);
7737: return(0);
7738: }
7740: /*MC
7741: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7743: Synopsis:
7744: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7746: Not collective
7748: Input Parameter:
7749: . x - matrix
7751: Output Parameters:
7752: + xx_v - the Fortran90 pointer to the array
7753: - ierr - error code
7755: Example of Usage:
7756: .vb
7757: PetscScalar, pointer xx_v(:,:)
7758: ....
7759: call MatDenseGetArrayF90(x,xx_v,ierr)
7760: a = xx_v(3)
7761: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7762: .ve
7764: Level: advanced
7766: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7768: Concepts: matrices^accessing array
7770: M*/
7772: /*MC
7773: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7774: accessed with MatDenseGetArrayF90().
7776: Synopsis:
7777: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7779: Not collective
7781: Input Parameters:
7782: + x - matrix
7783: - xx_v - the Fortran90 pointer to the array
7785: Output Parameter:
7786: . ierr - error code
7788: Example of Usage:
7789: .vb
7790: PetscScalar, pointer xx_v(:,:)
7791: ....
7792: call MatDenseGetArrayF90(x,xx_v,ierr)
7793: a = xx_v(3)
7794: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7795: .ve
7797: Level: advanced
7799: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7801: M*/
7804: /*MC
7805: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7807: Synopsis:
7808: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7810: Not collective
7812: Input Parameter:
7813: . x - matrix
7815: Output Parameters:
7816: + xx_v - the Fortran90 pointer to the array
7817: - ierr - error code
7819: Example of Usage:
7820: .vb
7821: PetscScalar, pointer xx_v(:)
7822: ....
7823: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7824: a = xx_v(3)
7825: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7826: .ve
7828: Level: advanced
7830: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7832: Concepts: matrices^accessing array
7834: M*/
7836: /*MC
7837: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7838: accessed with MatSeqAIJGetArrayF90().
7840: Synopsis:
7841: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7843: Not collective
7845: Input Parameters:
7846: + x - matrix
7847: - xx_v - the Fortran90 pointer to the array
7849: Output Parameter:
7850: . ierr - error code
7852: Example of Usage:
7853: .vb
7854: PetscScalar, pointer xx_v(:)
7855: ....
7856: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7857: a = xx_v(3)
7858: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7859: .ve
7861: Level: advanced
7863: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7865: M*/
7868: /*@
7869: MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7870: as the original matrix.
7872: Collective on Mat
7874: Input Parameters:
7875: + mat - the original matrix
7876: . isrow - parallel IS containing the rows this processor should obtain
7877: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7878: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7880: Output Parameter:
7881: . newmat - the new submatrix, of the same type as the old
7883: Level: advanced
7885: Notes:
7886: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7888: Some matrix types place restrictions on the row and column indices, such
7889: as that they be sorted or that they be equal to each other.
7891: The index sets may not have duplicate entries.
7893: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7894: the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7895: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7896: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7897: you are finished using it.
7899: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7900: the input matrix.
7902: If iscol is NULL then all columns are obtained (not supported in Fortran).
7904: Example usage:
7905: Consider the following 8x8 matrix with 34 non-zero values, that is
7906: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7907: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7908: as follows:
7910: .vb
7911: 1 2 0 | 0 3 0 | 0 4
7912: Proc0 0 5 6 | 7 0 0 | 8 0
7913: 9 0 10 | 11 0 0 | 12 0
7914: -------------------------------------
7915: 13 0 14 | 15 16 17 | 0 0
7916: Proc1 0 18 0 | 19 20 21 | 0 0
7917: 0 0 0 | 22 23 0 | 24 0
7918: -------------------------------------
7919: Proc2 25 26 27 | 0 0 28 | 29 0
7920: 30 0 0 | 31 32 33 | 0 34
7921: .ve
7923: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7925: .vb
7926: 2 0 | 0 3 0 | 0
7927: Proc0 5 6 | 7 0 0 | 8
7928: -------------------------------
7929: Proc1 18 0 | 19 20 21 | 0
7930: -------------------------------
7931: Proc2 26 27 | 0 0 28 | 29
7932: 0 0 | 31 32 33 | 0
7933: .ve
7936: Concepts: matrices^submatrices
7938: .seealso: MatCreateSubMatrices()
7939: @*/
7940: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7941: {
7943: PetscMPIInt size;
7944: Mat *local;
7945: IS iscoltmp;
7954: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7955: if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");
7957: MatCheckPreallocated(mat,1);
7958: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7960: if (!iscol || isrow == iscol) {
7961: PetscBool stride;
7962: PetscMPIInt grabentirematrix = 0,grab;
7963: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7964: if (stride) {
7965: PetscInt first,step,n,rstart,rend;
7966: ISStrideGetInfo(isrow,&first,&step);
7967: if (step == 1) {
7968: MatGetOwnershipRange(mat,&rstart,&rend);
7969: if (rstart == first) {
7970: ISGetLocalSize(isrow,&n);
7971: if (n == rend-rstart) {
7972: grabentirematrix = 1;
7973: }
7974: }
7975: }
7976: }
7977: MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7978: if (grab) {
7979: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7980: if (cll == MAT_INITIAL_MATRIX) {
7981: *newmat = mat;
7982: PetscObjectReference((PetscObject)mat);
7983: }
7984: return(0);
7985: }
7986: }
7988: if (!iscol) {
7989: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7990: } else {
7991: iscoltmp = iscol;
7992: }
7994: /* if original matrix is on just one processor then use submatrix generated */
7995: if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7996: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7997: if (!iscol) {ISDestroy(&iscoltmp);}
7998: return(0);
7999: } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
8000: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
8001: *newmat = *local;
8002: PetscFree(local);
8003: if (!iscol) {ISDestroy(&iscoltmp);}
8004: return(0);
8005: } else if (!mat->ops->createsubmatrix) {
8006: /* Create a new matrix type that implements the operation using the full matrix */
8007: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
8008: switch (cll) {
8009: case MAT_INITIAL_MATRIX:
8010: MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
8011: break;
8012: case MAT_REUSE_MATRIX:
8013: MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
8014: break;
8015: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
8016: }
8017: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
8018: if (!iscol) {ISDestroy(&iscoltmp);}
8019: return(0);
8020: }
8022: if (!mat->ops->createsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8023: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
8024: (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
8025: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
8027: /* Propagate symmetry information for diagonal blocks */
8028: if (isrow == iscoltmp) {
8029: if (mat->symmetric_set && mat->symmetric) {
8030: MatSetOption(*newmat,MAT_SYMMETRIC,PETSC_TRUE);
8031: }
8032: if (mat->structurally_symmetric_set && mat->structurally_symmetric) {
8033: MatSetOption(*newmat,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
8034: }
8035: if (mat->hermitian_set && mat->hermitian) {
8036: MatSetOption(*newmat,MAT_HERMITIAN,PETSC_TRUE);
8037: }
8038: if (mat->spd_set && mat->spd) {
8039: MatSetOption(*newmat,MAT_SPD,PETSC_TRUE);
8040: }
8041: }
8043: if (!iscol) {ISDestroy(&iscoltmp);}
8044: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
8045: return(0);
8046: }
8048: /*@
8049: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
8050: used during the assembly process to store values that belong to
8051: other processors.
8053: Not Collective
8055: Input Parameters:
8056: + mat - the matrix
8057: . size - the initial size of the stash.
8058: - bsize - the initial size of the block-stash(if used).
8060: Options Database Keys:
8061: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
8062: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
8064: Level: intermediate
8066: Notes:
8067: The block-stash is used for values set with MatSetValuesBlocked() while
8068: the stash is used for values set with MatSetValues()
8070: Run with the option -info and look for output of the form
8071: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
8072: to determine the appropriate value, MM, to use for size and
8073: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
8074: to determine the value, BMM to use for bsize
8076: Concepts: stash^setting matrix size
8077: Concepts: matrices^stash
8079: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
8081: @*/
8082: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
8083: {
8089: MatStashSetInitialSize_Private(&mat->stash,size);
8090: MatStashSetInitialSize_Private(&mat->bstash,bsize);
8091: return(0);
8092: }
8094: /*@
8095: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
8096: the matrix
8098: Neighbor-wise Collective on Mat
8100: Input Parameters:
8101: + mat - the matrix
8102: . x,y - the vectors
8103: - w - where the result is stored
8105: Level: intermediate
8107: Notes:
8108: w may be the same vector as y.
8110: This allows one to use either the restriction or interpolation (its transpose)
8111: matrix to do the interpolation
8113: Concepts: interpolation
8115: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
8117: @*/
8118: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
8119: {
8121: PetscInt M,N,Ny;
8129: MatCheckPreallocated(A,1);
8130: MatGetSize(A,&M,&N);
8131: VecGetSize(y,&Ny);
8132: if (M == Ny) {
8133: MatMultAdd(A,x,y,w);
8134: } else {
8135: MatMultTransposeAdd(A,x,y,w);
8136: }
8137: return(0);
8138: }
8140: /*@
8141: MatInterpolate - y = A*x or A'*x depending on the shape of
8142: the matrix
8144: Neighbor-wise Collective on Mat
8146: Input Parameters:
8147: + mat - the matrix
8148: - x,y - the vectors
8150: Level: intermediate
8152: Notes:
8153: This allows one to use either the restriction or interpolation (its transpose)
8154: matrix to do the interpolation
8156: Concepts: matrices^interpolation
8158: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
8160: @*/
8161: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
8162: {
8164: PetscInt M,N,Ny;
8171: MatCheckPreallocated(A,1);
8172: MatGetSize(A,&M,&N);
8173: VecGetSize(y,&Ny);
8174: if (M == Ny) {
8175: MatMult(A,x,y);
8176: } else {
8177: MatMultTranspose(A,x,y);
8178: }
8179: return(0);
8180: }
8182: /*@
8183: MatRestrict - y = A*x or A'*x
8185: Neighbor-wise Collective on Mat
8187: Input Parameters:
8188: + mat - the matrix
8189: - x,y - the vectors
8191: Level: intermediate
8193: Notes:
8194: This allows one to use either the restriction or interpolation (its transpose)
8195: matrix to do the restriction
8197: Concepts: matrices^restriction
8199: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
8201: @*/
8202: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8203: {
8205: PetscInt M,N,Ny;
8212: MatCheckPreallocated(A,1);
8214: MatGetSize(A,&M,&N);
8215: VecGetSize(y,&Ny);
8216: if (M == Ny) {
8217: MatMult(A,x,y);
8218: } else {
8219: MatMultTranspose(A,x,y);
8220: }
8221: return(0);
8222: }
8224: /*@
8225: MatGetNullSpace - retrieves the null space of a matrix.
8227: Logically Collective on Mat and MatNullSpace
8229: Input Parameters:
8230: + mat - the matrix
8231: - nullsp - the null space object
8233: Level: developer
8235: Concepts: null space^attaching to matrix
8237: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8238: @*/
8239: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8240: {
8244: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->nullsp) ? mat->transnullsp : mat->nullsp;
8245: return(0);
8246: }
8248: /*@
8249: MatSetNullSpace - attaches a null space to a matrix.
8251: Logically Collective on Mat and MatNullSpace
8253: Input Parameters:
8254: + mat - the matrix
8255: - nullsp - the null space object
8257: Level: advanced
8259: Notes:
8260: This null space is used by the linear solvers. Overwrites any previous null space that may have been attached
8262: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8263: call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.
8265: You can remove the null space by calling this routine with an nullsp of NULL
8268: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8269: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8270: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8271: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8272: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8274: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8276: If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8277: routine also automatically calls MatSetTransposeNullSpace().
8279: Concepts: null space^attaching to matrix
8281: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8282: @*/
8283: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8284: {
8290: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8291: MatNullSpaceDestroy(&mat->nullsp);
8292: mat->nullsp = nullsp;
8293: if (mat->symmetric_set && mat->symmetric) {
8294: MatSetTransposeNullSpace(mat,nullsp);
8295: }
8296: return(0);
8297: }
8299: /*@
8300: MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.
8302: Logically Collective on Mat and MatNullSpace
8304: Input Parameters:
8305: + mat - the matrix
8306: - nullsp - the null space object
8308: Level: developer
8310: Concepts: null space^attaching to matrix
8312: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8313: @*/
8314: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8315: {
8320: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->transnullsp) ? mat->nullsp : mat->transnullsp;
8321: return(0);
8322: }
8324: /*@
8325: MatSetTransposeNullSpace - attaches a null space to a matrix.
8327: Logically Collective on Mat and MatNullSpace
8329: Input Parameters:
8330: + mat - the matrix
8331: - nullsp - the null space object
8333: Level: advanced
8335: Notes:
8336: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8337: You must also call MatSetNullSpace()
8340: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8341: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8342: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8343: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8344: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8346: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
8348: Concepts: null space^attaching to matrix
8350: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8351: @*/
8352: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8353: {
8359: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8360: MatNullSpaceDestroy(&mat->transnullsp);
8361: mat->transnullsp = nullsp;
8362: return(0);
8363: }
8365: /*@
8366: MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8367: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
8369: Logically Collective on Mat and MatNullSpace
8371: Input Parameters:
8372: + mat - the matrix
8373: - nullsp - the null space object
8375: Level: advanced
8377: Notes:
8378: Overwrites any previous near null space that may have been attached
8380: You can remove the null space by calling this routine with an nullsp of NULL
8382: Concepts: null space^attaching to matrix
8384: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8385: @*/
8386: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8387: {
8394: MatCheckPreallocated(mat,1);
8395: if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8396: MatNullSpaceDestroy(&mat->nearnullsp);
8397: mat->nearnullsp = nullsp;
8398: return(0);
8399: }
8401: /*@
8402: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
8404: Not Collective
8406: Input Parameters:
8407: . mat - the matrix
8409: Output Parameters:
8410: . nullsp - the null space object, NULL if not set
8412: Level: developer
8414: Concepts: null space^attaching to matrix
8416: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8417: @*/
8418: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8419: {
8424: MatCheckPreallocated(mat,1);
8425: *nullsp = mat->nearnullsp;
8426: return(0);
8427: }
8429: /*@C
8430: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8432: Collective on Mat
8434: Input Parameters:
8435: + mat - the matrix
8436: . row - row/column permutation
8437: . fill - expected fill factor >= 1.0
8438: - level - level of fill, for ICC(k)
8440: Notes:
8441: Probably really in-place only when level of fill is zero, otherwise allocates
8442: new space to store factored matrix and deletes previous memory.
8444: Most users should employ the simplified KSP interface for linear solvers
8445: instead of working directly with matrix algebra routines such as this.
8446: See, e.g., KSPCreate().
8448: Level: developer
8450: Concepts: matrices^incomplete Cholesky factorization
8451: Concepts: Cholesky factorization
8453: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8455: Developer Note: fortran interface is not autogenerated as the f90
8456: interface defintion cannot be generated correctly [due to MatFactorInfo]
8458: @*/
8459: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8460: {
8468: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8469: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8470: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8471: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8472: MatCheckPreallocated(mat,1);
8473: (*mat->ops->iccfactor)(mat,row,info);
8474: PetscObjectStateIncrease((PetscObject)mat);
8475: return(0);
8476: }
8478: /*@
8479: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8480: ghosted ones.
8482: Not Collective
8484: Input Parameters:
8485: + mat - the matrix
8486: - diag = the diagonal values, including ghost ones
8488: Level: developer
8490: Notes:
8491: Works only for MPIAIJ and MPIBAIJ matrices
8493: .seealso: MatDiagonalScale()
8494: @*/
8495: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8496: {
8498: PetscMPIInt size;
8505: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8506: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8507: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8508: if (size == 1) {
8509: PetscInt n,m;
8510: VecGetSize(diag,&n);
8511: MatGetSize(mat,0,&m);
8512: if (m == n) {
8513: MatDiagonalScale(mat,0,diag);
8514: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8515: } else {
8516: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8517: }
8518: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8519: PetscObjectStateIncrease((PetscObject)mat);
8520: return(0);
8521: }
8523: /*@
8524: MatGetInertia - Gets the inertia from a factored matrix
8526: Collective on Mat
8528: Input Parameter:
8529: . mat - the matrix
8531: Output Parameters:
8532: + nneg - number of negative eigenvalues
8533: . nzero - number of zero eigenvalues
8534: - npos - number of positive eigenvalues
8536: Level: advanced
8538: Notes:
8539: Matrix must have been factored by MatCholeskyFactor()
8542: @*/
8543: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8544: {
8550: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8551: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8552: if (!mat->ops->getinertia) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8553: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8554: return(0);
8555: }
8557: /* ----------------------------------------------------------------*/
8558: /*@C
8559: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8561: Neighbor-wise Collective on Mat and Vecs
8563: Input Parameters:
8564: + mat - the factored matrix
8565: - b - the right-hand-side vectors
8567: Output Parameter:
8568: . x - the result vectors
8570: Notes:
8571: The vectors b and x cannot be the same. I.e., one cannot
8572: call MatSolves(A,x,x).
8574: Notes:
8575: Most users should employ the simplified KSP interface for linear solvers
8576: instead of working directly with matrix algebra routines such as this.
8577: See, e.g., KSPCreate().
8579: Level: developer
8581: Concepts: matrices^triangular solves
8583: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8584: @*/
8585: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8586: {
8592: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8593: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8594: if (!mat->rmap->N && !mat->cmap->N) return(0);
8596: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8597: MatCheckPreallocated(mat,1);
8598: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8599: (*mat->ops->solves)(mat,b,x);
8600: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8601: return(0);
8602: }
8604: /*@
8605: MatIsSymmetric - Test whether a matrix is symmetric
8607: Collective on Mat
8609: Input Parameter:
8610: + A - the matrix to test
8611: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8613: Output Parameters:
8614: . flg - the result
8616: Notes:
8617: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8619: Level: intermediate
8621: Concepts: matrix^symmetry
8623: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8624: @*/
8625: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8626: {
8633: if (!A->symmetric_set) {
8634: if (!A->ops->issymmetric) {
8635: MatType mattype;
8636: MatGetType(A,&mattype);
8637: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8638: }
8639: (*A->ops->issymmetric)(A,tol,flg);
8640: if (!tol) {
8641: A->symmetric_set = PETSC_TRUE;
8642: A->symmetric = *flg;
8643: if (A->symmetric) {
8644: A->structurally_symmetric_set = PETSC_TRUE;
8645: A->structurally_symmetric = PETSC_TRUE;
8646: }
8647: }
8648: } else if (A->symmetric) {
8649: *flg = PETSC_TRUE;
8650: } else if (!tol) {
8651: *flg = PETSC_FALSE;
8652: } else {
8653: if (!A->ops->issymmetric) {
8654: MatType mattype;
8655: MatGetType(A,&mattype);
8656: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8657: }
8658: (*A->ops->issymmetric)(A,tol,flg);
8659: }
8660: return(0);
8661: }
8663: /*@
8664: MatIsHermitian - Test whether a matrix is Hermitian
8666: Collective on Mat
8668: Input Parameter:
8669: + A - the matrix to test
8670: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8672: Output Parameters:
8673: . flg - the result
8675: Level: intermediate
8677: Concepts: matrix^symmetry
8679: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8680: MatIsSymmetricKnown(), MatIsSymmetric()
8681: @*/
8682: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8683: {
8690: if (!A->hermitian_set) {
8691: if (!A->ops->ishermitian) {
8692: MatType mattype;
8693: MatGetType(A,&mattype);
8694: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8695: }
8696: (*A->ops->ishermitian)(A,tol,flg);
8697: if (!tol) {
8698: A->hermitian_set = PETSC_TRUE;
8699: A->hermitian = *flg;
8700: if (A->hermitian) {
8701: A->structurally_symmetric_set = PETSC_TRUE;
8702: A->structurally_symmetric = PETSC_TRUE;
8703: }
8704: }
8705: } else if (A->hermitian) {
8706: *flg = PETSC_TRUE;
8707: } else if (!tol) {
8708: *flg = PETSC_FALSE;
8709: } else {
8710: if (!A->ops->ishermitian) {
8711: MatType mattype;
8712: MatGetType(A,&mattype);
8713: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8714: }
8715: (*A->ops->ishermitian)(A,tol,flg);
8716: }
8717: return(0);
8718: }
8720: /*@
8721: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8723: Not Collective
8725: Input Parameter:
8726: . A - the matrix to check
8728: Output Parameters:
8729: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8730: - flg - the result
8732: Level: advanced
8734: Concepts: matrix^symmetry
8736: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8737: if you want it explicitly checked
8739: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8740: @*/
8741: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8742: {
8747: if (A->symmetric_set) {
8748: *set = PETSC_TRUE;
8749: *flg = A->symmetric;
8750: } else {
8751: *set = PETSC_FALSE;
8752: }
8753: return(0);
8754: }
8756: /*@
8757: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8759: Not Collective
8761: Input Parameter:
8762: . A - the matrix to check
8764: Output Parameters:
8765: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8766: - flg - the result
8768: Level: advanced
8770: Concepts: matrix^symmetry
8772: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8773: if you want it explicitly checked
8775: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8776: @*/
8777: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8778: {
8783: if (A->hermitian_set) {
8784: *set = PETSC_TRUE;
8785: *flg = A->hermitian;
8786: } else {
8787: *set = PETSC_FALSE;
8788: }
8789: return(0);
8790: }
8792: /*@
8793: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8795: Collective on Mat
8797: Input Parameter:
8798: . A - the matrix to test
8800: Output Parameters:
8801: . flg - the result
8803: Level: intermediate
8805: Concepts: matrix^symmetry
8807: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8808: @*/
8809: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8810: {
8816: if (!A->structurally_symmetric_set) {
8817: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8818: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8820: A->structurally_symmetric_set = PETSC_TRUE;
8821: }
8822: *flg = A->structurally_symmetric;
8823: return(0);
8824: }
8826: /*@
8827: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8828: to be communicated to other processors during the MatAssemblyBegin/End() process
8830: Not collective
8832: Input Parameter:
8833: . vec - the vector
8835: Output Parameters:
8836: + nstash - the size of the stash
8837: . reallocs - the number of additional mallocs incurred.
8838: . bnstash - the size of the block stash
8839: - breallocs - the number of additional mallocs incurred.in the block stash
8841: Level: advanced
8843: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8845: @*/
8846: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8847: {
8851: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8852: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8853: return(0);
8854: }
8856: /*@C
8857: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8858: parallel layout
8860: Collective on Mat
8862: Input Parameter:
8863: . mat - the matrix
8865: Output Parameter:
8866: + right - (optional) vector that the matrix can be multiplied against
8867: - left - (optional) vector that the matrix vector product can be stored in
8869: Notes:
8870: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8872: Notes:
8873: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8875: Level: advanced
8877: .seealso: MatCreate(), VecDestroy()
8878: @*/
8879: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8880: {
8886: if (mat->ops->getvecs) {
8887: (*mat->ops->getvecs)(mat,right,left);
8888: } else {
8889: PetscInt rbs,cbs;
8890: MatGetBlockSizes(mat,&rbs,&cbs);
8891: if (right) {
8892: if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8893: VecCreate(PetscObjectComm((PetscObject)mat),right);
8894: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8895: VecSetBlockSize(*right,cbs);
8896: VecSetType(*right,mat->defaultvectype);
8897: PetscLayoutReference(mat->cmap,&(*right)->map);
8898: }
8899: if (left) {
8900: if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8901: VecCreate(PetscObjectComm((PetscObject)mat),left);
8902: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8903: VecSetBlockSize(*left,rbs);
8904: VecSetType(*left,mat->defaultvectype);
8905: PetscLayoutReference(mat->rmap,&(*left)->map);
8906: }
8907: }
8908: return(0);
8909: }
8911: /*@C
8912: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8913: with default values.
8915: Not Collective
8917: Input Parameters:
8918: . info - the MatFactorInfo data structure
8921: Notes:
8922: The solvers are generally used through the KSP and PC objects, for example
8923: PCLU, PCILU, PCCHOLESKY, PCICC
8925: Level: developer
8927: .seealso: MatFactorInfo
8929: Developer Note: fortran interface is not autogenerated as the f90
8930: interface defintion cannot be generated correctly [due to MatFactorInfo]
8932: @*/
8934: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8935: {
8939: PetscMemzero(info,sizeof(MatFactorInfo));
8940: return(0);
8941: }
8943: /*@
8944: MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed
8946: Collective on Mat
8948: Input Parameters:
8949: + mat - the factored matrix
8950: - is - the index set defining the Schur indices (0-based)
8952: Notes:
8953: Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.
8955: You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.
8957: Level: developer
8959: Concepts:
8961: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8962: MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()
8964: @*/
8965: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8966: {
8967: PetscErrorCode ierr,(*f)(Mat,IS);
8975: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8976: PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8977: if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverType does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8978: if (mat->schur) {
8979: MatDestroy(&mat->schur);
8980: }
8981: (*f)(mat,is);
8982: if (!mat->schur) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_PLIB,"Schur complement has not been created");
8983: MatFactorSetUpInPlaceSchur_Private(mat);
8984: return(0);
8985: }
8987: /*@
8988: MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step
8990: Logically Collective on Mat
8992: Input Parameters:
8993: + F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8994: . S - location where to return the Schur complement, can be NULL
8995: - status - the status of the Schur complement matrix, can be NULL
8997: Notes:
8998: You must call MatFactorSetSchurIS() before calling this routine.
9000: The routine provides a copy of the Schur matrix stored within the solver data structures.
9001: The caller must destroy the object when it is no longer needed.
9002: If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.
9004: Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)
9006: Developer Notes:
9007: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
9008: matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix.
9010: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
9012: Level: advanced
9014: References:
9016: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
9017: @*/
9018: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
9019: {
9026: if (S) {
9027: PetscErrorCode (*f)(Mat,Mat*);
9029: PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
9030: if (f) {
9031: (*f)(F,S);
9032: } else {
9033: MatDuplicate(F->schur,MAT_COPY_VALUES,S);
9034: }
9035: }
9036: if (status) *status = F->schur_status;
9037: return(0);
9038: }
9040: /*@
9041: MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix
9043: Logically Collective on Mat
9045: Input Parameters:
9046: + F - the factored matrix obtained by calling MatGetFactor()
9047: . *S - location where to return the Schur complement, can be NULL
9048: - status - the status of the Schur complement matrix, can be NULL
9050: Notes:
9051: You must call MatFactorSetSchurIS() before calling this routine.
9053: Schur complement mode is currently implemented for sequential matrices.
9054: The routine returns a the Schur Complement stored within the data strutures of the solver.
9055: If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
9056: The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.
9058: Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix
9060: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
9062: Level: advanced
9064: References:
9066: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
9067: @*/
9068: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
9069: {
9074: if (S) *S = F->schur;
9075: if (status) *status = F->schur_status;
9076: return(0);
9077: }
9079: /*@
9080: MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement
9082: Logically Collective on Mat
9084: Input Parameters:
9085: + F - the factored matrix obtained by calling MatGetFactor()
9086: . *S - location where the Schur complement is stored
9087: - status - the status of the Schur complement matrix (see MatFactorSchurStatus)
9089: Notes:
9091: Level: advanced
9093: References:
9095: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
9096: @*/
9097: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
9098: {
9103: if (S) {
9105: *S = NULL;
9106: }
9107: F->schur_status = status;
9108: MatFactorUpdateSchurStatus_Private(F);
9109: return(0);
9110: }
9112: /*@
9113: MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step
9115: Logically Collective on Mat
9117: Input Parameters:
9118: + F - the factored matrix obtained by calling MatGetFactor()
9119: . rhs - location where the right hand side of the Schur complement system is stored
9120: - sol - location where the solution of the Schur complement system has to be returned
9122: Notes:
9123: The sizes of the vectors should match the size of the Schur complement
9125: Must be called after MatFactorSetSchurIS()
9127: Level: advanced
9129: References:
9131: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
9132: @*/
9133: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
9134: {
9146: MatFactorFactorizeSchurComplement(F);
9147: switch (F->schur_status) {
9148: case MAT_FACTOR_SCHUR_FACTORED:
9149: MatSolveTranspose(F->schur,rhs,sol);
9150: break;
9151: case MAT_FACTOR_SCHUR_INVERTED:
9152: MatMultTranspose(F->schur,rhs,sol);
9153: break;
9154: default:
9155: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9156: break;
9157: }
9158: return(0);
9159: }
9161: /*@
9162: MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step
9164: Logically Collective on Mat
9166: Input Parameters:
9167: + F - the factored matrix obtained by calling MatGetFactor()
9168: . rhs - location where the right hand side of the Schur complement system is stored
9169: - sol - location where the solution of the Schur complement system has to be returned
9171: Notes:
9172: The sizes of the vectors should match the size of the Schur complement
9174: Must be called after MatFactorSetSchurIS()
9176: Level: advanced
9178: References:
9180: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
9181: @*/
9182: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9183: {
9195: MatFactorFactorizeSchurComplement(F);
9196: switch (F->schur_status) {
9197: case MAT_FACTOR_SCHUR_FACTORED:
9198: MatSolve(F->schur,rhs,sol);
9199: break;
9200: case MAT_FACTOR_SCHUR_INVERTED:
9201: MatMult(F->schur,rhs,sol);
9202: break;
9203: default:
9204: SETERRQ1(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %D",F->schur_status);
9205: break;
9206: }
9207: return(0);
9208: }
9210: /*@
9211: MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step
9213: Logically Collective on Mat
9215: Input Parameters:
9216: + F - the factored matrix obtained by calling MatGetFactor()
9218: Notes:
9219: Must be called after MatFactorSetSchurIS().
9221: Call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.
9223: Level: advanced
9225: References:
9227: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
9228: @*/
9229: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9230: {
9236: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return(0);
9237: MatFactorFactorizeSchurComplement(F);
9238: MatFactorInvertSchurComplement_Private(F);
9239: F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9240: return(0);
9241: }
9243: /*@
9244: MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step
9246: Logically Collective on Mat
9248: Input Parameters:
9249: + F - the factored matrix obtained by calling MatGetFactor()
9251: Notes:
9252: Must be called after MatFactorSetSchurIS().
9254: Level: advanced
9256: References:
9258: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9259: @*/
9260: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9261: {
9267: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return(0);
9268: MatFactorFactorizeSchurComplement_Private(F);
9269: F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9270: return(0);
9271: }
9273: /*@
9274: MatPtAP - Creates the matrix product C = P^T * A * P
9276: Neighbor-wise Collective on Mat
9278: Input Parameters:
9279: + A - the matrix
9280: . P - the projection matrix
9281: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9282: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9283: if the result is a dense matrix this is irrelevent
9285: Output Parameters:
9286: . C - the product matrix
9288: Notes:
9289: C will be created and must be destroyed by the user with MatDestroy().
9291: This routine is currently only implemented for pairs of sequential dense matrices, AIJ matrices and classes
9292: which inherit from AIJ.
9294: Level: intermediate
9296: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9297: @*/
9298: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9299: {
9301: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9302: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9303: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9304: PetscBool sametype;
9309: MatCheckPreallocated(A,1);
9310: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9311: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9312: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9315: MatCheckPreallocated(P,2);
9316: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9317: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9319: if (A->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9320: if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9321: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9322: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9324: if (scall == MAT_REUSE_MATRIX) {
9328: if (!(*C)->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You cannot use MAT_REUSE_MATRIX");
9329: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9330: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9331: (*(*C)->ops->ptapnumeric)(A,P,*C);
9332: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9333: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9334: return(0);
9335: }
9337: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9338: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9340: fA = A->ops->ptap;
9341: fP = P->ops->ptap;
9342: PetscStrcmp(((PetscObject)A)->type_name,((PetscObject)P)->type_name,&sametype);
9343: if (fP == fA && sametype) {
9344: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9345: ptap = fA;
9346: } else {
9347: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9348: char ptapname[256];
9349: PetscStrncpy(ptapname,"MatPtAP_",sizeof(ptapname));
9350: PetscStrlcat(ptapname,((PetscObject)A)->type_name,sizeof(ptapname));
9351: PetscStrlcat(ptapname,"_",sizeof(ptapname));
9352: PetscStrlcat(ptapname,((PetscObject)P)->type_name,sizeof(ptapname));
9353: PetscStrlcat(ptapname,"_C",sizeof(ptapname)); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9354: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9355: if (!ptap) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s (Misses composed function %s)",((PetscObject)A)->type_name,((PetscObject)P)->type_name,ptapname);
9356: }
9358: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9359: (*ptap)(A,P,scall,fill,C);
9360: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9361: if (A->symmetric_set && A->symmetric) {
9362: MatSetOption(*C,MAT_SYMMETRIC,PETSC_TRUE);
9363: }
9364: return(0);
9365: }
9367: /*@
9368: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
9370: Neighbor-wise Collective on Mat
9372: Input Parameters:
9373: + A - the matrix
9374: - P - the projection matrix
9376: Output Parameters:
9377: . C - the product matrix
9379: Notes:
9380: C must have been created by calling MatPtAPSymbolic and must be destroyed by
9381: the user using MatDeatroy().
9383: This routine is currently only implemented for pairs of AIJ matrices and classes
9384: which inherit from AIJ. C will be of type MATAIJ.
9386: Level: intermediate
9388: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9389: @*/
9390: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9391: {
9397: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9398: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9401: MatCheckPreallocated(P,2);
9402: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9403: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9406: MatCheckPreallocated(C,3);
9407: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9408: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9409: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9410: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9411: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9412: MatCheckPreallocated(A,1);
9414: if (!C->ops->ptapnumeric) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"MatPtAPNumeric implementation is missing. You should call MatPtAPSymbolic first");
9415: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9416: (*C->ops->ptapnumeric)(A,P,C);
9417: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9418: return(0);
9419: }
9421: /*@
9422: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
9424: Neighbor-wise Collective on Mat
9426: Input Parameters:
9427: + A - the matrix
9428: - P - the projection matrix
9430: Output Parameters:
9431: . C - the (i,j) structure of the product matrix
9433: Notes:
9434: C will be created and must be destroyed by the user with MatDestroy().
9436: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9437: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9438: this (i,j) structure by calling MatPtAPNumeric().
9440: Level: intermediate
9442: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9443: @*/
9444: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9445: {
9451: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9452: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9453: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9456: MatCheckPreallocated(P,2);
9457: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9458: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9461: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9462: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9463: MatCheckPreallocated(A,1);
9465: if (!A->ops->ptapsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatType %s",((PetscObject)A)->type_name);
9466: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9467: (*A->ops->ptapsymbolic)(A,P,fill,C);
9468: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
9470: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9471: return(0);
9472: }
9474: /*@
9475: MatRARt - Creates the matrix product C = R * A * R^T
9477: Neighbor-wise Collective on Mat
9479: Input Parameters:
9480: + A - the matrix
9481: . R - the projection matrix
9482: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9483: - fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9484: if the result is a dense matrix this is irrelevent
9486: Output Parameters:
9487: . C - the product matrix
9489: Notes:
9490: C will be created and must be destroyed by the user with MatDestroy().
9492: This routine is currently only implemented for pairs of AIJ matrices and classes
9493: which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9494: parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9495: We recommend using MatPtAP().
9497: Level: intermediate
9499: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9500: @*/
9501: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9502: {
9508: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9509: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9510: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9513: MatCheckPreallocated(R,2);
9514: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9515: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9517: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9519: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9520: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9521: MatCheckPreallocated(A,1);
9523: if (!A->ops->rart) {
9524: Mat Rt;
9525: MatTranspose(R,MAT_INITIAL_MATRIX,&Rt);
9526: MatMatMatMult(R,A,Rt,scall,fill,C);
9527: MatDestroy(&Rt);
9528: return(0);
9529: }
9530: PetscLogEventBegin(MAT_RARt,A,R,0,0);
9531: (*A->ops->rart)(A,R,scall,fill,C);
9532: PetscLogEventEnd(MAT_RARt,A,R,0,0);
9533: return(0);
9534: }
9536: /*@
9537: MatRARtNumeric - Computes the matrix product C = R * A * R^T
9539: Neighbor-wise Collective on Mat
9541: Input Parameters:
9542: + A - the matrix
9543: - R - the projection matrix
9545: Output Parameters:
9546: . C - the product matrix
9548: Notes:
9549: C must have been created by calling MatRARtSymbolic and must be destroyed by
9550: the user using MatDestroy().
9552: This routine is currently only implemented for pairs of AIJ matrices and classes
9553: which inherit from AIJ. C will be of type MATAIJ.
9555: Level: intermediate
9557: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
9558: @*/
9559: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
9560: {
9566: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9567: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9570: MatCheckPreallocated(R,2);
9571: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9572: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9575: MatCheckPreallocated(C,3);
9576: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9577: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
9578: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9579: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9580: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
9581: MatCheckPreallocated(A,1);
9583: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
9584: (*A->ops->rartnumeric)(A,R,C);
9585: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
9586: return(0);
9587: }
9589: /*@
9590: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
9592: Neighbor-wise Collective on Mat
9594: Input Parameters:
9595: + A - the matrix
9596: - R - the projection matrix
9598: Output Parameters:
9599: . C - the (i,j) structure of the product matrix
9601: Notes:
9602: C will be created and must be destroyed by the user with MatDestroy().
9604: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9605: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
9606: this (i,j) structure by calling MatRARtNumeric().
9608: Level: intermediate
9610: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
9611: @*/
9612: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
9613: {
9619: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9620: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9621: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9624: MatCheckPreallocated(R,2);
9625: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9626: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9629: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9630: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9631: MatCheckPreallocated(A,1);
9632: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9633: (*A->ops->rartsymbolic)(A,R,fill,C);
9634: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
9636: MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9637: return(0);
9638: }
9640: /*@
9641: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9643: Neighbor-wise Collective on Mat
9645: Input Parameters:
9646: + A - the left matrix
9647: . B - the right matrix
9648: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9649: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9650: if the result is a dense matrix this is irrelevent
9652: Output Parameters:
9653: . C - the product matrix
9655: Notes:
9656: Unless scall is MAT_REUSE_MATRIX C will be created.
9658: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call and C was obtained from a previous
9659: call to this function with either MAT_INITIAL_MATRIX or MatMatMultSymbolic()
9661: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9662: actually needed.
9664: If you have many matrices with the same non-zero structure to multiply, you
9665: should either
9666: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9667: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9668: In the special case where matrix B (and hence C) are dense you can create the correctly sized matrix C yourself and then call this routine
9669: with MAT_REUSE_MATRIX, rather than first having MatMatMult() create it for you. You can NEVER do this if the matrix C is sparse.
9671: Level: intermediate
9673: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
9674: @*/
9675: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9676: {
9678: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9679: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9680: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9685: MatCheckPreallocated(A,1);
9686: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9687: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9690: MatCheckPreallocated(B,2);
9691: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9692: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9694: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9695: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9696: if (scall == MAT_REUSE_MATRIX) {
9699: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9700: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9701: (*(*C)->ops->matmultnumeric)(A,B,*C);
9702: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9703: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9704: return(0);
9705: }
9706: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9707: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9709: fA = A->ops->matmult;
9710: fB = B->ops->matmult;
9711: if (fB == fA) {
9712: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9713: mult = fB;
9714: } else {
9715: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9716: char multname[256];
9717: PetscStrncpy(multname,"MatMatMult_",sizeof(multname));
9718: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9719: PetscStrlcat(multname,"_",sizeof(multname));
9720: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9721: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9722: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9723: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9724: }
9725: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9726: (*mult)(A,B,scall,fill,C);
9727: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9728: return(0);
9729: }
9731: /*@
9732: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9733: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
9735: Neighbor-wise Collective on Mat
9737: Input Parameters:
9738: + A - the left matrix
9739: . B - the right matrix
9740: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9741: if C is a dense matrix this is irrelevent
9743: Output Parameters:
9744: . C - the product matrix
9746: Notes:
9747: Unless scall is MAT_REUSE_MATRIX C will be created.
9749: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9750: actually needed.
9752: This routine is currently implemented for
9753: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9754: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9755: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9757: Level: intermediate
9759: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9760: We should incorporate them into PETSc.
9762: .seealso: MatMatMult(), MatMatMultNumeric()
9763: @*/
9764: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9765: {
9767: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9768: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9769: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
9774: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9775: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9779: MatCheckPreallocated(B,2);
9780: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9781: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9784: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9785: if (fill == PETSC_DEFAULT) fill = 2.0;
9786: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9787: MatCheckPreallocated(A,1);
9789: Asymbolic = A->ops->matmultsymbolic;
9790: Bsymbolic = B->ops->matmultsymbolic;
9791: if (Asymbolic == Bsymbolic) {
9792: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9793: symbolic = Bsymbolic;
9794: } else { /* dispatch based on the type of A and B */
9795: char symbolicname[256];
9796: PetscStrncpy(symbolicname,"MatMatMultSymbolic_",sizeof(symbolicname));
9797: PetscStrlcat(symbolicname,((PetscObject)A)->type_name,sizeof(symbolicname));
9798: PetscStrlcat(symbolicname,"_",sizeof(symbolicname));
9799: PetscStrlcat(symbolicname,((PetscObject)B)->type_name,sizeof(symbolicname));
9800: PetscStrlcat(symbolicname,"_C",sizeof(symbolicname));
9801: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9802: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9803: }
9804: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9805: (*symbolic)(A,B,fill,C);
9806: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9807: return(0);
9808: }
9810: /*@
9811: MatMatMultNumeric - Performs the numeric matrix-matrix product.
9812: Call this routine after first calling MatMatMultSymbolic().
9814: Neighbor-wise Collective on Mat
9816: Input Parameters:
9817: + A - the left matrix
9818: - B - the right matrix
9820: Output Parameters:
9821: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
9823: Notes:
9824: C must have been created with MatMatMultSymbolic().
9826: This routine is currently implemented for
9827: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9828: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9829: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9831: Level: intermediate
9833: .seealso: MatMatMult(), MatMatMultSymbolic()
9834: @*/
9835: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9836: {
9840: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9841: return(0);
9842: }
9844: /*@
9845: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9847: Neighbor-wise Collective on Mat
9849: Input Parameters:
9850: + A - the left matrix
9851: . B - the right matrix
9852: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9853: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9855: Output Parameters:
9856: . C - the product matrix
9858: Notes:
9859: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9861: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9863: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9864: actually needed.
9866: This routine is currently only implemented for pairs of SeqAIJ matrices and for the SeqDense class.
9868: Level: intermediate
9870: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9871: @*/
9872: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9873: {
9875: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9876: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9881: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9882: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9883: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9886: MatCheckPreallocated(B,2);
9887: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9888: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9890: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9891: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9892: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9893: MatCheckPreallocated(A,1);
9895: fA = A->ops->mattransposemult;
9896: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9897: fB = B->ops->mattransposemult;
9898: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9899: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9901: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9902: if (scall == MAT_INITIAL_MATRIX) {
9903: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9904: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9905: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9906: }
9907: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9908: (*A->ops->mattransposemultnumeric)(A,B,*C);
9909: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9910: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9911: return(0);
9912: }
9914: /*@
9915: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9917: Neighbor-wise Collective on Mat
9919: Input Parameters:
9920: + A - the left matrix
9921: . B - the right matrix
9922: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9923: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9925: Output Parameters:
9926: . C - the product matrix
9928: Notes:
9929: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9931: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9933: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9934: actually needed.
9936: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9937: which inherit from SeqAIJ. C will be of same type as the input matrices.
9939: Level: intermediate
9941: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9942: @*/
9943: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9944: {
9946: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9947: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9948: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
9953: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
9954: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9955: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9958: MatCheckPreallocated(B,2);
9959: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9960: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9962: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9963: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9964: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9965: MatCheckPreallocated(A,1);
9967: fA = A->ops->transposematmult;
9968: fB = B->ops->transposematmult;
9969: if (fB==fA) {
9970: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9971: transposematmult = fA;
9972: } else {
9973: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9974: char multname[256];
9975: PetscStrncpy(multname,"MatTransposeMatMult_",sizeof(multname));
9976: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
9977: PetscStrlcat(multname,"_",sizeof(multname));
9978: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
9979: PetscStrlcat(multname,"_C",sizeof(multname)); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9980: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9981: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9982: }
9983: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9984: (*transposematmult)(A,B,scall,fill,C);
9985: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9986: return(0);
9987: }
9989: /*@
9990: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9992: Neighbor-wise Collective on Mat
9994: Input Parameters:
9995: + A - the left matrix
9996: . B - the middle matrix
9997: . C - the right matrix
9998: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9999: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
10000: if the result is a dense matrix this is irrelevent
10002: Output Parameters:
10003: . D - the product matrix
10005: Notes:
10006: Unless scall is MAT_REUSE_MATRIX D will be created.
10008: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
10010: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
10011: actually needed.
10013: If you have many matrices with the same non-zero structure to multiply, you
10014: should use MAT_REUSE_MATRIX in all calls but the first or
10016: Level: intermediate
10018: .seealso: MatMatMult, MatPtAP()
10019: @*/
10020: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
10021: {
10023: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10024: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10025: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
10026: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
10031: MatCheckPreallocated(A,1);
10032: if (scall == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
10033: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10034: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10037: MatCheckPreallocated(B,2);
10038: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10039: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10042: MatCheckPreallocated(C,3);
10043: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10044: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10045: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
10046: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
10047: if (scall == MAT_REUSE_MATRIX) {
10050: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
10051: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
10052: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
10053: return(0);
10054: }
10055: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
10056: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
10058: fA = A->ops->matmatmult;
10059: fB = B->ops->matmatmult;
10060: fC = C->ops->matmatmult;
10061: if (fA == fB && fA == fC) {
10062: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
10063: mult = fA;
10064: } else {
10065: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
10066: char multname[256];
10067: PetscStrncpy(multname,"MatMatMatMult_",sizeof(multname));
10068: PetscStrlcat(multname,((PetscObject)A)->type_name,sizeof(multname));
10069: PetscStrlcat(multname,"_",sizeof(multname));
10070: PetscStrlcat(multname,((PetscObject)B)->type_name,sizeof(multname));
10071: PetscStrlcat(multname,"_",sizeof(multname));
10072: PetscStrlcat(multname,((PetscObject)C)->type_name,sizeof(multname));
10073: PetscStrlcat(multname,"_C",sizeof(multname));
10074: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
10075: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
10076: }
10077: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
10078: (*mult)(A,B,C,scall,fill,D);
10079: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
10080: return(0);
10081: }
10083: /*@
10084: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
10086: Collective on Mat
10088: Input Parameters:
10089: + mat - the matrix
10090: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
10091: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
10092: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10094: Output Parameter:
10095: . matredundant - redundant matrix
10097: Notes:
10098: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
10099: original matrix has not changed from that last call to MatCreateRedundantMatrix().
10101: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
10102: calling it.
10104: Level: advanced
10106: Concepts: subcommunicator
10107: Concepts: duplicate matrix
10109: .seealso: MatDestroy()
10110: @*/
10111: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
10112: {
10114: MPI_Comm comm;
10115: PetscMPIInt size;
10116: PetscInt mloc_sub,nloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
10117: Mat_Redundant *redund=NULL;
10118: PetscSubcomm psubcomm=NULL;
10119: MPI_Comm subcomm_in=subcomm;
10120: Mat *matseq;
10121: IS isrow,iscol;
10122: PetscBool newsubcomm=PETSC_FALSE;
10126: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
10129: }
10131: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10132: if (size == 1 || nsubcomm == 1) {
10133: if (reuse == MAT_INITIAL_MATRIX) {
10134: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
10135: } else {
10136: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10137: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
10138: }
10139: return(0);
10140: }
10142: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10143: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10144: MatCheckPreallocated(mat,1);
10146: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
10147: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
10148: /* create psubcomm, then get subcomm */
10149: PetscObjectGetComm((PetscObject)mat,&comm);
10150: MPI_Comm_size(comm,&size);
10151: if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);
10153: PetscSubcommCreate(comm,&psubcomm);
10154: PetscSubcommSetNumber(psubcomm,nsubcomm);
10155: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
10156: PetscSubcommSetFromOptions(psubcomm);
10157: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
10158: newsubcomm = PETSC_TRUE;
10159: PetscSubcommDestroy(&psubcomm);
10160: }
10162: /* get isrow, iscol and a local sequential matrix matseq[0] */
10163: if (reuse == MAT_INITIAL_MATRIX) {
10164: mloc_sub = PETSC_DECIDE;
10165: nloc_sub = PETSC_DECIDE;
10166: if (bs < 1) {
10167: PetscSplitOwnership(subcomm,&mloc_sub,&M);
10168: PetscSplitOwnership(subcomm,&nloc_sub,&N);
10169: } else {
10170: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
10171: PetscSplitOwnershipBlock(subcomm,bs,&nloc_sub,&N);
10172: }
10173: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
10174: rstart = rend - mloc_sub;
10175: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
10176: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
10177: } else { /* reuse == MAT_REUSE_MATRIX */
10178: if (*matredundant == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10179: /* retrieve subcomm */
10180: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
10181: redund = (*matredundant)->redundant;
10182: isrow = redund->isrow;
10183: iscol = redund->iscol;
10184: matseq = redund->matseq;
10185: }
10186: MatCreateSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
10188: /* get matredundant over subcomm */
10189: if (reuse == MAT_INITIAL_MATRIX) {
10190: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],nloc_sub,reuse,matredundant);
10192: /* create a supporting struct and attach it to C for reuse */
10193: PetscNewLog(*matredundant,&redund);
10194: (*matredundant)->redundant = redund;
10195: redund->isrow = isrow;
10196: redund->iscol = iscol;
10197: redund->matseq = matseq;
10198: if (newsubcomm) {
10199: redund->subcomm = subcomm;
10200: } else {
10201: redund->subcomm = MPI_COMM_NULL;
10202: }
10203: } else {
10204: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
10205: }
10206: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
10207: return(0);
10208: }
10210: /*@C
10211: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
10212: a given 'mat' object. Each submatrix can span multiple procs.
10214: Collective on Mat
10216: Input Parameters:
10217: + mat - the matrix
10218: . subcomm - the subcommunicator obtained by com_split(comm)
10219: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10221: Output Parameter:
10222: . subMat - 'parallel submatrices each spans a given subcomm
10224: Notes:
10225: The submatrix partition across processors is dictated by 'subComm' a
10226: communicator obtained by com_split(comm). The comm_split
10227: is not restriced to be grouped with consecutive original ranks.
10229: Due the comm_split() usage, the parallel layout of the submatrices
10230: map directly to the layout of the original matrix [wrt the local
10231: row,col partitioning]. So the original 'DiagonalMat' naturally maps
10232: into the 'DiagonalMat' of the subMat, hence it is used directly from
10233: the subMat. However the offDiagMat looses some columns - and this is
10234: reconstructed with MatSetValues()
10236: Level: advanced
10238: Concepts: subcommunicator
10239: Concepts: submatrices
10241: .seealso: MatCreateSubMatrices()
10242: @*/
10243: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
10244: {
10246: PetscMPIInt commsize,subCommSize;
10249: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
10250: MPI_Comm_size(subComm,&subCommSize);
10251: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
10253: if (scall == MAT_REUSE_MATRIX && *subMat == mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10254: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
10255: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
10256: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
10257: return(0);
10258: }
10260: /*@
10261: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
10263: Not Collective
10265: Input Arguments:
10266: mat - matrix to extract local submatrix from
10267: isrow - local row indices for submatrix
10268: iscol - local column indices for submatrix
10270: Output Arguments:
10271: submat - the submatrix
10273: Level: intermediate
10275: Notes:
10276: The submat should be returned with MatRestoreLocalSubMatrix().
10278: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
10279: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
10281: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
10282: MatSetValuesBlockedLocal() will also be implemented.
10284: The mat must have had a ISLocalToGlobalMapping provided to it with MatSetLocalToGlobalMapping(). Note that
10285: matrices obtained with DMCreateMatrix() generally already have the local to global mapping provided.
10287: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef(), MatSetLocalToGlobalMapping()
10288: @*/
10289: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10290: {
10299: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must have local to global mapping provided before this call");
10301: if (mat->ops->getlocalsubmatrix) {
10302: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
10303: } else {
10304: MatCreateLocalRef(mat,isrow,iscol,submat);
10305: }
10306: return(0);
10307: }
10309: /*@
10310: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
10312: Not Collective
10314: Input Arguments:
10315: mat - matrix to extract local submatrix from
10316: isrow - local row indices for submatrix
10317: iscol - local column indices for submatrix
10318: submat - the submatrix
10320: Level: intermediate
10322: .seealso: MatGetLocalSubMatrix()
10323: @*/
10324: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10325: {
10334: if (*submat) {
10336: }
10338: if (mat->ops->restorelocalsubmatrix) {
10339: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
10340: } else {
10341: MatDestroy(submat);
10342: }
10343: *submat = NULL;
10344: return(0);
10345: }
10347: /* --------------------------------------------------------*/
10348: /*@
10349: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no diagonal entry in the matrix
10351: Collective on Mat
10353: Input Parameter:
10354: . mat - the matrix
10356: Output Parameter:
10357: . is - if any rows have zero diagonals this contains the list of them
10359: Level: developer
10361: Concepts: matrix-vector product
10363: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10364: @*/
10365: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
10366: {
10372: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10373: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10375: if (!mat->ops->findzerodiagonals) {
10376: Vec diag;
10377: const PetscScalar *a;
10378: PetscInt *rows;
10379: PetscInt rStart, rEnd, r, nrow = 0;
10381: MatCreateVecs(mat, &diag, NULL);
10382: MatGetDiagonal(mat, diag);
10383: MatGetOwnershipRange(mat, &rStart, &rEnd);
10384: VecGetArrayRead(diag, &a);
10385: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
10386: PetscMalloc1(nrow, &rows);
10387: nrow = 0;
10388: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
10389: VecRestoreArrayRead(diag, &a);
10390: VecDestroy(&diag);
10391: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
10392: } else {
10393: (*mat->ops->findzerodiagonals)(mat, is);
10394: }
10395: return(0);
10396: }
10398: /*@
10399: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
10401: Collective on Mat
10403: Input Parameter:
10404: . mat - the matrix
10406: Output Parameter:
10407: . is - contains the list of rows with off block diagonal entries
10409: Level: developer
10411: Concepts: matrix-vector product
10413: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10414: @*/
10415: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
10416: {
10422: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10423: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10425: if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
10426: (*mat->ops->findoffblockdiagonalentries)(mat,is);
10427: return(0);
10428: }
10430: /*@C
10431: MatInvertBlockDiagonal - Inverts the block diagonal entries.
10433: Collective on Mat
10435: Input Parameters:
10436: . mat - the matrix
10438: Output Parameters:
10439: . values - the block inverses in column major order (FORTRAN-like)
10441: Note:
10442: This routine is not available from Fortran.
10444: Level: advanced
10446: .seealso: MatInvertBockDiagonalMat
10447: @*/
10448: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
10449: {
10454: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10455: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10456: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10457: (*mat->ops->invertblockdiagonal)(mat,values);
10458: return(0);
10459: }
10461: /*@C
10462: MatInvertVariableBlockDiagonal - Inverts the block diagonal entries.
10464: Collective on Mat
10466: Input Parameters:
10467: + mat - the matrix
10468: . nblocks - the number of blocks
10469: - bsizes - the size of each block
10471: Output Parameters:
10472: . values - the block inverses in column major order (FORTRAN-like)
10474: Note:
10475: This routine is not available from Fortran.
10477: Level: advanced
10479: .seealso: MatInvertBockDiagonal()
10480: @*/
10481: PetscErrorCode MatInvertVariableBlockDiagonal(Mat mat,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *values)
10482: {
10487: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10488: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10489: if (!mat->ops->invertvariableblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10490: (*mat->ops->invertvariableblockdiagonal)(mat,nblocks,bsizes,values);
10491: return(0);
10492: }
10494: /*@
10495: MatInvertBlockDiagonalMat - set matrix C to be the inverted block diagonal of matrix A
10497: Collective on Mat
10499: Input Parameters:
10500: . A - the matrix
10502: Output Parameters:
10503: . C - matrix with inverted block diagonal of A. This matrix should be created and may have its type set.
10505: Notes: the blocksize of the matrix is used to determine the blocks on the diagonal of C
10507: Level: advanced
10509: .seealso: MatInvertBockDiagonal()
10510: @*/
10511: PetscErrorCode MatInvertBlockDiagonalMat(Mat A,Mat C)
10512: {
10513: PetscErrorCode ierr;
10514: const PetscScalar *vals;
10515: PetscInt *dnnz;
10516: PetscInt M,N,m,n,rstart,rend,bs,i,j;
10519: MatInvertBlockDiagonal(A,&vals);
10520: MatGetBlockSize(A,&bs);
10521: MatGetSize(A,&M,&N);
10522: MatGetLocalSize(A,&m,&n);
10523: MatSetSizes(C,m,n,M,N);
10524: MatSetBlockSize(C,bs);
10525: PetscMalloc1(m/bs,&dnnz);
10526: for(j = 0; j < m/bs; j++) {
10527: dnnz[j] = 1;
10528: }
10529: MatXAIJSetPreallocation(C,bs,dnnz,NULL,NULL,NULL);
10530: PetscFree(dnnz);
10531: MatGetOwnershipRange(C,&rstart,&rend);
10532: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_FALSE);
10533: for (i = rstart/bs; i < rend/bs; i++) {
10534: MatSetValuesBlocked(C,1,&i,1,&i,&vals[(i-rstart/bs)*bs*bs],INSERT_VALUES);
10535: }
10536: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
10537: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
10538: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_TRUE);
10539: return(0);
10540: }
10542: /*@C
10543: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
10544: via MatTransposeColoringCreate().
10546: Collective on MatTransposeColoring
10548: Input Parameter:
10549: . c - coloring context
10551: Level: intermediate
10553: .seealso: MatTransposeColoringCreate()
10554: @*/
10555: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
10556: {
10557: PetscErrorCode ierr;
10558: MatTransposeColoring matcolor=*c;
10561: if (!matcolor) return(0);
10562: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
10564: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
10565: PetscFree(matcolor->rows);
10566: PetscFree(matcolor->den2sp);
10567: PetscFree(matcolor->colorforcol);
10568: PetscFree(matcolor->columns);
10569: if (matcolor->brows>0) {
10570: PetscFree(matcolor->lstart);
10571: }
10572: PetscHeaderDestroy(c);
10573: return(0);
10574: }
10576: /*@C
10577: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
10578: a MatTransposeColoring context has been created, computes a dense B^T by Apply
10579: MatTransposeColoring to sparse B.
10581: Collective on MatTransposeColoring
10583: Input Parameters:
10584: + B - sparse matrix B
10585: . Btdense - symbolic dense matrix B^T
10586: - coloring - coloring context created with MatTransposeColoringCreate()
10588: Output Parameter:
10589: . Btdense - dense matrix B^T
10591: Level: advanced
10593: Notes:
10594: These are used internally for some implementations of MatRARt()
10596: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplyDenToSp()
10598: .keywords: coloring
10599: @*/
10600: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
10601: {
10609: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
10610: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
10611: return(0);
10612: }
10614: /*@C
10615: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
10616: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
10617: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
10618: Csp from Cden.
10620: Collective on MatTransposeColoring
10622: Input Parameters:
10623: + coloring - coloring context created with MatTransposeColoringCreate()
10624: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
10626: Output Parameter:
10627: . Csp - sparse matrix
10629: Level: advanced
10631: Notes:
10632: These are used internally for some implementations of MatRARt()
10634: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
10636: .keywords: coloring
10637: @*/
10638: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
10639: {
10647: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
10648: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
10649: return(0);
10650: }
10652: /*@C
10653: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
10655: Collective on Mat
10657: Input Parameters:
10658: + mat - the matrix product C
10659: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
10661: Output Parameter:
10662: . color - the new coloring context
10664: Level: intermediate
10666: .seealso: MatTransposeColoringDestroy(), MatTransColoringApplySpToDen(),
10667: MatTransColoringApplyDenToSp()
10668: @*/
10669: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
10670: {
10671: MatTransposeColoring c;
10672: MPI_Comm comm;
10673: PetscErrorCode ierr;
10676: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
10677: PetscObjectGetComm((PetscObject)mat,&comm);
10678: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
10680: c->ctype = iscoloring->ctype;
10681: if (mat->ops->transposecoloringcreate) {
10682: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
10683: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
10685: *color = c;
10686: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10687: return(0);
10688: }
10690: /*@
10691: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10692: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10693: same, otherwise it will be larger
10695: Not Collective
10697: Input Parameter:
10698: . A - the matrix
10700: Output Parameter:
10701: . state - the current state
10703: Notes:
10704: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10705: different matrices
10707: Level: intermediate
10709: @*/
10710: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10711: {
10714: *state = mat->nonzerostate;
10715: return(0);
10716: }
10718: /*@
10719: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10720: matrices from each processor
10722: Collective on MPI_Comm
10724: Input Parameters:
10725: + comm - the communicators the parallel matrix will live on
10726: . seqmat - the input sequential matrices
10727: . n - number of local columns (or PETSC_DECIDE)
10728: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10730: Output Parameter:
10731: . mpimat - the parallel matrix generated
10733: Level: advanced
10735: Notes:
10736: The number of columns of the matrix in EACH processor MUST be the same.
10738: @*/
10739: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10740: {
10744: if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10745: if (reuse == MAT_REUSE_MATRIX && seqmat == *mpimat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"MAT_REUSE_MATRIX means reuse the matrix passed in as the final argument, not the original matrix");
10747: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10748: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10749: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10750: return(0);
10751: }
10753: /*@
10754: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10755: ranks' ownership ranges.
10757: Collective on A
10759: Input Parameters:
10760: + A - the matrix to create subdomains from
10761: - N - requested number of subdomains
10764: Output Parameters:
10765: + n - number of subdomains resulting on this rank
10766: - iss - IS list with indices of subdomains on this rank
10768: Level: advanced
10770: Notes:
10771: number of subdomains must be smaller than the communicator size
10772: @*/
10773: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10774: {
10775: MPI_Comm comm,subcomm;
10776: PetscMPIInt size,rank,color;
10777: PetscInt rstart,rend,k;
10778: PetscErrorCode ierr;
10781: PetscObjectGetComm((PetscObject)A,&comm);
10782: MPI_Comm_size(comm,&size);
10783: MPI_Comm_rank(comm,&rank);
10784: if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10785: *n = 1;
10786: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10787: color = rank/k;
10788: MPI_Comm_split(comm,color,rank,&subcomm);
10789: PetscMalloc1(1,iss);
10790: MatGetOwnershipRange(A,&rstart,&rend);
10791: ISCreateStride(subcomm,rend-rstart,rstart,1,iss[0]);
10792: MPI_Comm_free(&subcomm);
10793: return(0);
10794: }
10796: /*@
10797: MatGalerkin - Constructs the coarse grid problem via Galerkin projection.
10799: If the interpolation and restriction operators are the same, uses MatPtAP.
10800: If they are not the same, use MatMatMatMult.
10802: Once the coarse grid problem is constructed, correct for interpolation operators
10803: that are not of full rank, which can legitimately happen in the case of non-nested
10804: geometric multigrid.
10806: Input Parameters:
10807: + restrct - restriction operator
10808: . dA - fine grid matrix
10809: . interpolate - interpolation operator
10810: . reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10811: - fill - expected fill, use PETSC_DEFAULT if you do not have a good estimate
10813: Output Parameters:
10814: . A - the Galerkin coarse matrix
10816: Options Database Key:
10817: . -pc_mg_galerkin <both,pmat,mat,none>
10819: Level: developer
10821: .keywords: MG, multigrid, Galerkin
10823: .seealso: MatPtAP(), MatMatMatMult()
10824: @*/
10825: PetscErrorCode MatGalerkin(Mat restrct, Mat dA, Mat interpolate, MatReuse reuse, PetscReal fill, Mat *A)
10826: {
10828: IS zerorows;
10829: Vec diag;
10832: if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Inplace product not supported");
10833: /* Construct the coarse grid matrix */
10834: if (interpolate == restrct) {
10835: MatPtAP(dA,interpolate,reuse,fill,A);
10836: } else {
10837: MatMatMatMult(restrct,dA,interpolate,reuse,fill,A);
10838: }
10840: /* If the interpolation matrix is not of full rank, A will have zero rows.
10841: This can legitimately happen in the case of non-nested geometric multigrid.
10842: In that event, we set the rows of the matrix to the rows of the identity,
10843: ignoring the equations (as the RHS will also be zero). */
10845: MatFindZeroRows(*A, &zerorows);
10847: if (zerorows != NULL) { /* if there are any zero rows */
10848: MatCreateVecs(*A, &diag, NULL);
10849: MatGetDiagonal(*A, diag);
10850: VecISSet(diag, zerorows, 1.0);
10851: MatDiagonalSet(*A, diag, INSERT_VALUES);
10852: VecDestroy(&diag);
10853: ISDestroy(&zerorows);
10854: }
10855: return(0);
10856: }
10858: /*@C
10859: MatSetOperation - Allows user to set a matrix operation for any matrix type
10861: Logically Collective on Mat
10863: Input Parameters:
10864: + mat - the matrix
10865: . op - the name of the operation
10866: - f - the function that provides the operation
10868: Level: developer
10870: Usage:
10871: $ extern PetscErrorCode usermult(Mat,Vec,Vec);
10872: $ MatCreateXXX(comm,...&A);
10873: $ MatSetOperation(A,MATOP_MULT,(void(*)(void))usermult);
10875: Notes:
10876: See the file include/petscmat.h for a complete list of matrix
10877: operations, which all have the form MATOP_<OPERATION>, where
10878: <OPERATION> is the name (in all capital letters) of the
10879: user interface routine (e.g., MatMult() -> MATOP_MULT).
10881: All user-provided functions (except for MATOP_DESTROY) should have the same calling
10882: sequence as the usual matrix interface routines, since they
10883: are intended to be accessed via the usual matrix interface
10884: routines, e.g.,
10885: $ MatMult(Mat,Vec,Vec) -> usermult(Mat,Vec,Vec)
10887: In particular each function MUST return an error code of 0 on success and
10888: nonzero on failure.
10890: This routine is distinct from MatShellSetOperation() in that it can be called on any matrix type.
10892: .keywords: matrix, set, operation
10894: .seealso: MatGetOperation(), MatCreateShell(), MatShellSetContext(), MatShellSetOperation()
10895: @*/
10896: PetscErrorCode MatSetOperation(Mat mat,MatOperation op,void (*f)(void))
10897: {
10900: if (op == MATOP_VIEW && !mat->ops->viewnative && f != (void (*)(void))(mat->ops->view)) {
10901: mat->ops->viewnative = mat->ops->view;
10902: }
10903: (((void(**)(void))mat->ops)[op]) = f;
10904: return(0);
10905: }
10907: /*@C
10908: MatGetOperation - Gets a matrix operation for any matrix type.
10910: Not Collective
10912: Input Parameters:
10913: + mat - the matrix
10914: - op - the name of the operation
10916: Output Parameter:
10917: . f - the function that provides the operation
10919: Level: developer
10921: Usage:
10922: $ PetscErrorCode (*usermult)(Mat,Vec,Vec);
10923: $ MatGetOperation(A,MATOP_MULT,(void(**)(void))&usermult);
10925: Notes:
10926: See the file include/petscmat.h for a complete list of matrix
10927: operations, which all have the form MATOP_<OPERATION>, where
10928: <OPERATION> is the name (in all capital letters) of the
10929: user interface routine (e.g., MatMult() -> MATOP_MULT).
10931: This routine is distinct from MatShellGetOperation() in that it can be called on any matrix type.
10933: .keywords: matrix, get, operation
10935: .seealso: MatSetOperation(), MatCreateShell(), MatShellGetContext(), MatShellGetOperation()
10936: @*/
10937: PetscErrorCode MatGetOperation(Mat mat,MatOperation op,void(**f)(void))
10938: {
10941: *f = (((void (**)(void))mat->ops)[op]);
10942: return(0);
10943: }
10945: /*@
10946: MatHasOperation - Determines whether the given matrix supports the particular
10947: operation.
10949: Not Collective
10951: Input Parameters:
10952: + mat - the matrix
10953: - op - the operation, for example, MATOP_GET_DIAGONAL
10955: Output Parameter:
10956: . has - either PETSC_TRUE or PETSC_FALSE
10958: Level: advanced
10960: Notes:
10961: See the file include/petscmat.h for a complete list of matrix
10962: operations, which all have the form MATOP_<OPERATION>, where
10963: <OPERATION> is the name (in all capital letters) of the
10964: user-level routine. E.g., MatNorm() -> MATOP_NORM.
10966: .keywords: matrix, has, operation
10968: .seealso: MatCreateShell()
10969: @*/
10970: PetscErrorCode MatHasOperation(Mat mat,MatOperation op,PetscBool *has)
10971: {
10978: if (mat->ops->hasoperation) {
10979: (*mat->ops->hasoperation)(mat,op,has);
10980: } else {
10981: if (((void**)mat->ops)[op]) *has = PETSC_TRUE;
10982: else {
10983: *has = PETSC_FALSE;
10984: if (op == MATOP_CREATE_SUBMATRIX) {
10985: PetscMPIInt size;
10987: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10988: if (size == 1) {
10989: MatHasOperation(mat,MATOP_CREATE_SUBMATRICES,has);
10990: }
10991: }
10992: }
10993: }
10994: return(0);
10995: }
10997: /*@
10998: MatHasCongruentLayouts - Determines whether the rows and columns layouts
10999: of the matrix are congruent
11001: Collective on mat
11003: Input Parameters:
11004: . mat - the matrix
11006: Output Parameter:
11007: . cong - either PETSC_TRUE or PETSC_FALSE
11009: Level: beginner
11011: Notes:
11013: .keywords: matrix, has
11015: .seealso: MatCreate(), MatSetSizes()
11016: @*/
11017: PetscErrorCode MatHasCongruentLayouts(Mat mat,PetscBool *cong)
11018: {
11025: if (!mat->rmap || !mat->cmap) {
11026: *cong = mat->rmap == mat->cmap ? PETSC_TRUE : PETSC_FALSE;
11027: return(0);
11028: }
11029: if (mat->congruentlayouts == PETSC_DECIDE) { /* first time we compare rows and cols layouts */
11030: PetscLayoutCompare(mat->rmap,mat->cmap,cong);
11031: if (*cong) mat->congruentlayouts = 1;
11032: else mat->congruentlayouts = 0;
11033: } else *cong = mat->congruentlayouts ? PETSC_TRUE : PETSC_FALSE;
11034: return(0);
11035: }