Actual source code: matrix.c
petsc-3.6.1 2015-08-06
2: /*
3: This is where the abstract matrix operations are defined
4: */
6: #include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
7: #include <petsc/private/vecimpl.h>
8: #include <petsc/private/isimpl.h>
10: /* Logging support */
11: PetscClassId MAT_CLASSID;
12: PetscClassId MAT_COLORING_CLASSID;
13: PetscClassId MAT_FDCOLORING_CLASSID;
14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_GetSubMatrices, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_GetSubMatrix;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
36: PetscLogEvent MAT_ViennaCLCopyToGPU;
37: PetscLogEvent MAT_Merge,MAT_Residual;
38: PetscLogEvent Mat_Coloring_Apply,Mat_Coloring_Comm,Mat_Coloring_Local,Mat_Coloring_ISCreate,Mat_Coloring_SetUp,Mat_Coloring_Weights;
40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};
44: /*@
45: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations
47: Logically Collective on Vec
49: Input Parameters:
50: + x - the vector
51: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
52: it will create one internally.
54: Output Parameter:
55: . x - the vector
57: Example of Usage:
58: .vb
59: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
60: VecSetRandom(x,rctx);
61: PetscRandomDestroy(rctx);
62: .ve
64: Level: intermediate
66: Concepts: vector^setting to random
67: Concepts: random^vector
69: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
70: @*/
71: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
72: {
74: PetscRandom randObj = NULL;
81: if (!rctx) {
82: MPI_Comm comm;
83: PetscObjectGetComm((PetscObject)x,&comm);
84: PetscRandomCreate(comm,&randObj);
85: PetscRandomSetFromOptions(randObj);
86: rctx = randObj;
87: }
89: PetscLogEventBegin(VEC_SetRandom,x,rctx,0,0);
90: (*x->ops->setrandom)(x,rctx);
91: PetscLogEventEnd(VEC_SetRandom,x,rctx,0,0);
93: x->assembled = PETSC_TRUE;
94: PetscRandomDestroy(&randObj);
95: return(0);
96: }
101: /*@
102: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
104: Input Parameter:
105: . A - the matrix
107: Output Parameter:
108: . keptrows - the rows that are not completely zero
110: Level: intermediate
112: @*/
113: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
114: {
119: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
120: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
121: if (!mat->ops->findnonzerorows) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not coded for this matrix type");
122: (*mat->ops->findnonzerorows)(mat,keptrows);
123: return(0);
124: }
128: /*@
129: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
131: Not Collective
133: Input Parameters:
134: . A - the matrix
136: Output Parameters:
137: . a - the diagonal part (which is a SEQUENTIAL matrix)
139: Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
140: Use caution, as the reference count on the returned matrix is not incremented and it is used as
141: part of the containing MPI Mat's normal operation.
143: Level: advanced
145: @*/
146: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
147: {
148: PetscErrorCode ierr,(*f)(Mat,Mat*);
149: PetscMPIInt size;
155: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
156: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
157: PetscObjectQueryFunction((PetscObject)A,"MatGetDiagonalBlock_C",&f);
158: if (f) {
159: (*f)(A,a);
160: return(0);
161: } else if (size == 1) {
162: *a = A;
163: } else {
164: MatType mattype;
165: MatGetType(A,&mattype);
166: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix type %s does not support getting diagonal block",mattype);
167: }
168: return(0);
169: }
173: /*@
174: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
176: Collective on Mat
178: Input Parameters:
179: . mat - the matrix
181: Output Parameter:
182: . trace - the sum of the diagonal entries
184: Level: advanced
186: @*/
187: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
188: {
190: Vec diag;
193: MatCreateVecs(mat,&diag,NULL);
194: MatGetDiagonal(mat,diag);
195: VecSum(diag,trace);
196: VecDestroy(&diag);
197: return(0);
198: }
202: /*@
203: MatRealPart - Zeros out the imaginary part of the matrix
205: Logically Collective on Mat
207: Input Parameters:
208: . mat - the matrix
210: Level: advanced
213: .seealso: MatImaginaryPart()
214: @*/
215: PetscErrorCode MatRealPart(Mat mat)
216: {
222: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
223: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
224: if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
225: MatCheckPreallocated(mat,1);
226: (*mat->ops->realpart)(mat);
227: #if defined(PETSC_HAVE_CUSP)
228: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
229: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
230: }
231: #endif
232: #if defined(PETSC_HAVE_VIENNACL)
233: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
234: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
235: }
236: #endif
237: return(0);
238: }
242: /*@C
243: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
245: Collective on Mat
247: Input Parameter:
248: . mat - the matrix
250: Output Parameters:
251: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
252: - ghosts - the global indices of the ghost points
254: Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()
256: Level: advanced
258: @*/
259: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
260: {
266: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
267: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
268: if (!mat->ops->getghosts) {
269: if (nghosts) *nghosts = 0;
270: if (ghosts) *ghosts = 0;
271: } else {
272: (*mat->ops->getghosts)(mat,nghosts,ghosts);
273: }
274: return(0);
275: }
280: /*@
281: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
283: Logically Collective on Mat
285: Input Parameters:
286: . mat - the matrix
288: Level: advanced
291: .seealso: MatRealPart()
292: @*/
293: PetscErrorCode MatImaginaryPart(Mat mat)
294: {
300: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
301: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
302: if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
303: MatCheckPreallocated(mat,1);
304: (*mat->ops->imaginarypart)(mat);
305: #if defined(PETSC_HAVE_CUSP)
306: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
307: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
308: }
309: #endif
310: #if defined(PETSC_HAVE_VIENNACL)
311: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
312: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
313: }
314: #endif
315: return(0);
316: }
320: /*@
321: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
323: Collective on Mat
325: Input Parameter:
326: . mat - the matrix
328: Output Parameters:
329: + missing - is any diagonal missing
330: - dd - first diagonal entry that is missing (optional)
332: Level: advanced
335: .seealso: MatRealPart()
336: @*/
337: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
338: {
344: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
345: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
346: if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
347: (*mat->ops->missingdiagonal)(mat,missing,dd);
348: return(0);
349: }
353: /*@C
354: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
355: for each row that you get to ensure that your application does
356: not bleed memory.
358: Not Collective
360: Input Parameters:
361: + mat - the matrix
362: - row - the row to get
364: Output Parameters:
365: + ncols - if not NULL, the number of nonzeros in the row
366: . cols - if not NULL, the column numbers
367: - vals - if not NULL, the values
369: Notes:
370: This routine is provided for people who need to have direct access
371: to the structure of a matrix. We hope that we provide enough
372: high-level matrix routines that few users will need it.
374: MatGetRow() always returns 0-based column indices, regardless of
375: whether the internal representation is 0-based (default) or 1-based.
377: For better efficiency, set cols and/or vals to NULL if you do
378: not wish to extract these quantities.
380: The user can only examine the values extracted with MatGetRow();
381: the values cannot be altered. To change the matrix entries, one
382: must use MatSetValues().
384: You can only have one call to MatGetRow() outstanding for a particular
385: matrix at a time, per processor. MatGetRow() can only obtain rows
386: associated with the given processor, it cannot get rows from the
387: other processors; for that we suggest using MatGetSubMatrices(), then
388: MatGetRow() on the submatrix. The row indix passed to MatGetRows()
389: is in the global number of rows.
391: Fortran Notes:
392: The calling sequence from Fortran is
393: .vb
394: MatGetRow(matrix,row,ncols,cols,values,ierr)
395: Mat matrix (input)
396: integer row (input)
397: integer ncols (output)
398: integer cols(maxcols) (output)
399: double precision (or double complex) values(maxcols) output
400: .ve
401: where maxcols >= maximum nonzeros in any row of the matrix.
404: Caution:
405: Do not try to change the contents of the output arrays (cols and vals).
406: In some cases, this may corrupt the matrix.
408: Level: advanced
410: Concepts: matrices^row access
412: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatGetSubMatrices(), MatGetDiagonal()
413: @*/
414: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
415: {
417: PetscInt incols;
422: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
423: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
424: if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
425: MatCheckPreallocated(mat,1);
426: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
427: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
428: if (ncols) *ncols = incols;
429: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
430: return(0);
431: }
435: /*@
436: MatConjugate - replaces the matrix values with their complex conjugates
438: Logically Collective on Mat
440: Input Parameters:
441: . mat - the matrix
443: Level: advanced
445: .seealso: VecConjugate()
446: @*/
447: PetscErrorCode MatConjugate(Mat mat)
448: {
449: #if defined(PETSC_USE_COMPLEX)
454: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
455: if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
456: (*mat->ops->conjugate)(mat);
457: #if defined(PETSC_HAVE_CUSP)
458: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
459: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
460: }
461: #endif
462: #if defined(PETSC_HAVE_VIENNACL)
463: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
464: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
465: }
466: #endif
467: return(0);
468: #else
469: return 0;
470: #endif
471: }
475: /*@C
476: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
478: Not Collective
480: Input Parameters:
481: + mat - the matrix
482: . row - the row to get
483: . ncols, cols - the number of nonzeros and their columns
484: - vals - if nonzero the column values
486: Notes:
487: This routine should be called after you have finished examining the entries.
489: This routine zeros out ncols, cols, and vals. This is to prevent accidental
490: us of the array after it has been restored. If you pass NULL, it will
491: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
493: Fortran Notes:
494: The calling sequence from Fortran is
495: .vb
496: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
497: Mat matrix (input)
498: integer row (input)
499: integer ncols (output)
500: integer cols(maxcols) (output)
501: double precision (or double complex) values(maxcols) output
502: .ve
503: Where maxcols >= maximum nonzeros in any row of the matrix.
505: In Fortran MatRestoreRow() MUST be called after MatGetRow()
506: before another call to MatGetRow() can be made.
508: Level: advanced
510: .seealso: MatGetRow()
511: @*/
512: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
513: {
519: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
520: if (!mat->ops->restorerow) return(0);
521: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
522: if (ncols) *ncols = 0;
523: if (cols) *cols = NULL;
524: if (vals) *vals = NULL;
525: return(0);
526: }
530: /*@
531: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
532: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
534: Not Collective
536: Input Parameters:
537: + mat - the matrix
539: Notes:
540: The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.
542: Level: advanced
544: Concepts: matrices^row access
546: .seealso: MatRestoreRowRowUpperTriangular()
547: @*/
548: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
549: {
555: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
556: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
557: if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
558: MatCheckPreallocated(mat,1);
559: (*mat->ops->getrowuppertriangular)(mat);
560: return(0);
561: }
565: /*@
566: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
568: Not Collective
570: Input Parameters:
571: + mat - the matrix
573: Notes:
574: This routine should be called after you have finished MatGetRow/MatRestoreRow().
577: Level: advanced
579: .seealso: MatGetRowUpperTriangular()
580: @*/
581: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
582: {
587: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
588: if (!mat->ops->restorerowuppertriangular) return(0);
589: (*mat->ops->restorerowuppertriangular)(mat);
590: return(0);
591: }
595: /*@C
596: MatSetOptionsPrefix - Sets the prefix used for searching for all
597: Mat options in the database.
599: Logically Collective on Mat
601: Input Parameter:
602: + A - the Mat context
603: - prefix - the prefix to prepend to all option names
605: Notes:
606: A hyphen (-) must NOT be given at the beginning of the prefix name.
607: The first character of all runtime options is AUTOMATICALLY the hyphen.
609: Level: advanced
611: .keywords: Mat, set, options, prefix, database
613: .seealso: MatSetFromOptions()
614: @*/
615: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
616: {
621: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
622: return(0);
623: }
627: /*@C
628: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
629: Mat options in the database.
631: Logically Collective on Mat
633: Input Parameters:
634: + A - the Mat context
635: - prefix - the prefix to prepend to all option names
637: Notes:
638: A hyphen (-) must NOT be given at the beginning of the prefix name.
639: The first character of all runtime options is AUTOMATICALLY the hyphen.
641: Level: advanced
643: .keywords: Mat, append, options, prefix, database
645: .seealso: MatGetOptionsPrefix()
646: @*/
647: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
648: {
653: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
654: return(0);
655: }
659: /*@C
660: MatGetOptionsPrefix - Sets the prefix used for searching for all
661: Mat options in the database.
663: Not Collective
665: Input Parameter:
666: . A - the Mat context
668: Output Parameter:
669: . prefix - pointer to the prefix string used
671: Notes: On the fortran side, the user should pass in a string 'prefix' of
672: sufficient length to hold the prefix.
674: Level: advanced
676: .keywords: Mat, get, options, prefix, database
678: .seealso: MatAppendOptionsPrefix()
679: @*/
680: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
681: {
686: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
687: return(0);
688: }
692: /*@
693: MatSetUp - Sets up the internal matrix data structures for the later use.
695: Collective on Mat
697: Input Parameters:
698: . A - the Mat context
700: Notes:
701: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
703: If a suitable preallocation routine is used, this function does not need to be called.
705: See the Performance chapter of the PETSc users manual for how to preallocate matrices
707: Level: beginner
709: .keywords: Mat, setup
711: .seealso: MatCreate(), MatDestroy()
712: @*/
713: PetscErrorCode MatSetUp(Mat A)
714: {
715: PetscMPIInt size;
720: if (!((PetscObject)A)->type_name) {
721: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
722: if (size == 1) {
723: MatSetType(A, MATSEQAIJ);
724: } else {
725: MatSetType(A, MATMPIAIJ);
726: }
727: }
728: if (!A->preallocated && A->ops->setup) {
729: PetscInfo(A,"Warning not preallocating matrix storage\n");
730: (*A->ops->setup)(A);
731: }
732: A->preallocated = PETSC_TRUE;
733: return(0);
734: }
736: #if defined(PETSC_HAVE_SAWS)
737: #include <petscviewersaws.h>
738: #endif
741: /*@C
742: MatView - Visualizes a matrix object.
744: Collective on Mat
746: Input Parameters:
747: + mat - the matrix
748: - viewer - visualization context
750: Notes:
751: The available visualization contexts include
752: + PETSC_VIEWER_STDOUT_SELF - for sequential matrices
753: . PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
754: . PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
755: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
757: The user can open alternative visualization contexts with
758: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
759: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
760: specified file; corresponding input uses MatLoad()
761: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
762: an X window display
763: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
764: Currently only the sequential dense and AIJ
765: matrix types support the Socket viewer.
767: The user can call PetscViewerSetFormat() to specify the output
768: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
769: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
770: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
771: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
772: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
773: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
774: format common among all matrix types
775: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
776: format (which is in many cases the same as the default)
777: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
778: size and structure (not the matrix entries)
779: . PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
780: the matrix structure
782: Options Database Keys:
783: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
784: . -mat_view ::ascii_info_detail - Prints more detailed info
785: . -mat_view - Prints matrix in ASCII format
786: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
787: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
788: . -display <name> - Sets display name (default is host)
789: . -draw_pause <sec> - Sets number of seconds to pause after display
790: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 11 Using MATLAB with PETSc for details)
791: . -viewer_socket_machine <machine> -
792: . -viewer_socket_port <port> -
793: . -mat_view binary - save matrix to file in binary format
794: - -viewer_binary_filename <name> -
795: Level: beginner
797: Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
798: viewer is used.
800: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
801: viewer is used.
803: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
804: And then use the following mouse functions:
805: left mouse: zoom in
806: middle mouse: zoom out
807: right mouse: continue with the simulation
809: Concepts: matrices^viewing
810: Concepts: matrices^plotting
811: Concepts: matrices^printing
813: .seealso: PetscViewerSetFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
814: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
815: @*/
816: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
817: {
818: PetscErrorCode ierr;
819: PetscInt rows,cols,rbs,cbs;
820: PetscBool iascii;
821: PetscViewerFormat format;
822: #if defined(PETSC_HAVE_SAWS)
823: PetscBool issaws;
824: #endif
829: if (!viewer) {
830: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
831: }
834: MatCheckPreallocated(mat,1);
836: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
837: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
838: PetscViewerGetFormat(viewer,&format);
839: if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
840: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
841: }
843: #if defined(PETSC_HAVE_SAWS)
844: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
845: #endif
846: if (iascii) {
847: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
848: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
849: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
850: PetscViewerASCIIPushTab(viewer);
851: MatGetSize(mat,&rows,&cols);
852: MatGetBlockSizes(mat,&rbs,&cbs);
853: if (rbs != 1 || cbs != 1) {
854: if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
855: else {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
856: } else {
857: PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
858: }
859: if (mat->factortype) {
860: const MatSolverPackage solver;
861: MatFactorGetSolverPackage(mat,&solver);
862: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
863: }
864: if (mat->ops->getinfo) {
865: MatInfo info;
866: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
867: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%g, allocated nonzeros=%g\n",info.nz_used,info.nz_allocated);
868: PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
869: }
870: if (mat->nullsp) {PetscViewerASCIIPrintf(viewer," has attached null space\n");}
871: if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer," has attached near null space\n");}
872: }
873: #if defined(PETSC_HAVE_SAWS)
874: } else if (issaws) {
875: PetscMPIInt rank;
877: PetscObjectName((PetscObject)mat);
878: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
879: if (!((PetscObject)mat)->amsmem && !rank) {
880: PetscObjectViewSAWs((PetscObject)mat,viewer);
881: }
882: #endif
883: }
884: if (mat->ops->view) {
885: PetscViewerASCIIPushTab(viewer);
886: (*mat->ops->view)(mat,viewer);
887: PetscViewerASCIIPopTab(viewer);
888: }
889: if (iascii) {
890: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
891: PetscViewerGetFormat(viewer,&format);
892: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
893: PetscViewerASCIIPopTab(viewer);
894: }
895: }
896: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
897: return(0);
898: }
900: #if defined(PETSC_USE_DEBUG)
901: #include <../src/sys/totalview/tv_data_display.h>
902: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
903: {
904: TV_add_row("Local rows", "int", &mat->rmap->n);
905: TV_add_row("Local columns", "int", &mat->cmap->n);
906: TV_add_row("Global rows", "int", &mat->rmap->N);
907: TV_add_row("Global columns", "int", &mat->cmap->N);
908: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
909: return TV_format_OK;
910: }
911: #endif
915: /*@C
916: MatLoad - Loads a matrix that has been stored in binary format
917: with MatView(). The matrix format is determined from the options database.
918: Generates a parallel MPI matrix if the communicator has more than one
919: processor. The default matrix type is AIJ.
921: Collective on PetscViewer
923: Input Parameters:
924: + newmat - the newly loaded matrix, this needs to have been created with MatCreate()
925: or some related function before a call to MatLoad()
926: - viewer - binary file viewer, created with PetscViewerBinaryOpen()
928: Options Database Keys:
929: Used with block matrix formats (MATSEQBAIJ, ...) to specify
930: block size
931: . -matload_block_size <bs>
933: Level: beginner
935: Notes:
936: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
937: Mat before calling this routine if you wish to set it from the options database.
939: MatLoad() automatically loads into the options database any options
940: given in the file filename.info where filename is the name of the file
941: that was passed to the PetscViewerBinaryOpen(). The options in the info
942: file will be ignored if you use the -viewer_binary_skip_info option.
944: If the type or size of newmat is not set before a call to MatLoad, PETSc
945: sets the default matrix type AIJ and sets the local and global sizes.
946: If type and/or size is already set, then the same are used.
948: In parallel, each processor can load a subset of rows (or the
949: entire matrix). This routine is especially useful when a large
950: matrix is stored on disk and only part of it is desired on each
951: processor. For example, a parallel solver may access only some of
952: the rows from each processor. The algorithm used here reads
953: relatively small blocks of data rather than reading the entire
954: matrix and then subsetting it.
956: Notes for advanced users:
957: Most users should not need to know the details of the binary storage
958: format, since MatLoad() and MatView() completely hide these details.
959: But for anyone who's interested, the standard binary matrix storage
960: format is
962: $ int MAT_FILE_CLASSID
963: $ int number of rows
964: $ int number of columns
965: $ int total number of nonzeros
966: $ int *number nonzeros in each row
967: $ int *column indices of all nonzeros (starting index is zero)
968: $ PetscScalar *values of all nonzeros
970: PETSc automatically does the byte swapping for
971: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
972: linux, Windows and the paragon; thus if you write your own binary
973: read/write routines you have to swap the bytes; see PetscBinaryRead()
974: and PetscBinaryWrite() to see how this may be done.
976: .keywords: matrix, load, binary, input
978: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()
980: @*/
981: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
982: {
984: PetscBool isbinary,flg;
989: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
990: if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");
992: if (!((PetscObject)newmat)->type_name) {
993: MatSetType(newmat,MATAIJ);
994: }
996: if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
997: PetscLogEventBegin(MAT_Load,viewer,0,0,0);
998: (*newmat->ops->load)(newmat,viewer);
999: PetscLogEventEnd(MAT_Load,viewer,0,0,0);
1001: flg = PETSC_FALSE;
1002: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1003: if (flg) {
1004: MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1005: MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1006: }
1007: flg = PETSC_FALSE;
1008: PetscOptionsGetBool(((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1009: if (flg) {
1010: MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1011: }
1012: return(0);
1013: }
1017: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1018: {
1020: Mat_Redundant *redund = *redundant;
1021: PetscInt i;
1024: if (redund){
1025: if (redund->matseq) { /* via MatGetSubMatrices() */
1026: ISDestroy(&redund->isrow);
1027: ISDestroy(&redund->iscol);
1028: MatDestroy(&redund->matseq[0]);
1029: PetscFree(redund->matseq);
1030: } else {
1031: PetscFree2(redund->send_rank,redund->recv_rank);
1032: PetscFree(redund->sbuf_j);
1033: PetscFree(redund->sbuf_a);
1034: for (i=0; i<redund->nrecvs; i++) {
1035: PetscFree(redund->rbuf_j[i]);
1036: PetscFree(redund->rbuf_a[i]);
1037: }
1038: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1039: }
1041: if (redund->subcomm) {
1042: PetscCommDestroy(&redund->subcomm);
1043: }
1044: PetscFree(redund);
1045: }
1046: return(0);
1047: }
1051: /*@
1052: MatDestroy - Frees space taken by a matrix.
1054: Collective on Mat
1056: Input Parameter:
1057: . A - the matrix
1059: Level: beginner
1061: @*/
1062: PetscErrorCode MatDestroy(Mat *A)
1063: {
1067: if (!*A) return(0);
1069: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}
1071: /* if memory was published with SAWs then destroy it */
1072: PetscObjectSAWsViewOff((PetscObject)*A);
1073: if ((*A)->ops->destroy) {
1074: (*(*A)->ops->destroy)(*A);
1075: }
1076: MatDestroy_Redundant(&(*A)->redundant);
1077: MatNullSpaceDestroy(&(*A)->nullsp);
1078: MatNullSpaceDestroy(&(*A)->transnullsp);
1079: MatNullSpaceDestroy(&(*A)->nearnullsp);
1080: PetscLayoutDestroy(&(*A)->rmap);
1081: PetscLayoutDestroy(&(*A)->cmap);
1082: PetscHeaderDestroy(A);
1083: return(0);
1084: }
1088: /*@
1089: MatSetValues - Inserts or adds a block of values into a matrix.
1090: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1091: MUST be called after all calls to MatSetValues() have been completed.
1093: Not Collective
1095: Input Parameters:
1096: + mat - the matrix
1097: . v - a logically two-dimensional array of values
1098: . m, idxm - the number of rows and their global indices
1099: . n, idxn - the number of columns and their global indices
1100: - addv - either ADD_VALUES or INSERT_VALUES, where
1101: ADD_VALUES adds values to any existing entries, and
1102: INSERT_VALUES replaces existing entries with new values
1104: Notes:
1105: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1106: MatSetUp() before using this routine
1108: By default the values, v, are row-oriented. See MatSetOption() for other options.
1110: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1111: options cannot be mixed without intervening calls to the assembly
1112: routines.
1114: MatSetValues() uses 0-based row and column numbers in Fortran
1115: as well as in C.
1117: Negative indices may be passed in idxm and idxn, these rows and columns are
1118: simply ignored. This allows easily inserting element stiffness matrices
1119: with homogeneous Dirchlet boundary conditions that you don't want represented
1120: in the matrix.
1122: Efficiency Alert:
1123: The routine MatSetValuesBlocked() may offer much better efficiency
1124: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1126: Level: beginner
1128: Concepts: matrices^putting entries in
1130: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1131: InsertMode, INSERT_VALUES, ADD_VALUES
1132: @*/
1133: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1134: {
1136: #if defined(PETSC_USE_DEBUG)
1137: PetscInt i,j;
1138: #endif
1143: if (!m || !n) return(0); /* no values to insert */
1147: MatCheckPreallocated(mat,1);
1148: if (mat->insertmode == NOT_SET_VALUES) {
1149: mat->insertmode = addv;
1150: }
1151: #if defined(PETSC_USE_DEBUG)
1152: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1153: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1154: if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1156: for (i=0; i<m; i++) {
1157: for (j=0; j<n; j++) {
1158: if (mat->erroriffpe && PetscIsInfOrNanScalar(v[i*n+j]))
1159: #if defined(PETSC_USE_COMPLEX)
1160: SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1161: #else
1162: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1163: #endif
1164: }
1165: }
1166: #endif
1168: if (mat->assembled) {
1169: mat->was_assembled = PETSC_TRUE;
1170: mat->assembled = PETSC_FALSE;
1171: }
1172: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1173: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1174: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1175: #if defined(PETSC_HAVE_CUSP)
1176: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1177: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1178: }
1179: #endif
1180: #if defined(PETSC_HAVE_VIENNACL)
1181: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1182: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1183: }
1184: #endif
1185: return(0);
1186: }
1191: /*@
1192: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1193: values into a matrix
1195: Not Collective
1197: Input Parameters:
1198: + mat - the matrix
1199: . row - the (block) row to set
1200: - v - a logically two-dimensional array of values
1202: Notes:
1203: By the values, v, are column-oriented (for the block version) and sorted
1205: All the nonzeros in the row must be provided
1207: The matrix must have previously had its column indices set
1209: The row must belong to this process
1211: Level: intermediate
1213: Concepts: matrices^putting entries in
1215: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1216: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1217: @*/
1218: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1219: {
1221: PetscInt globalrow;
1227: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1228: MatSetValuesRow(mat,globalrow,v);
1229: #if defined(PETSC_HAVE_CUSP)
1230: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1231: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1232: }
1233: #endif
1234: #if defined(PETSC_HAVE_VIENNACL)
1235: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1236: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1237: }
1238: #endif
1239: return(0);
1240: }
1244: /*@
1245: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1246: values into a matrix
1248: Not Collective
1250: Input Parameters:
1251: + mat - the matrix
1252: . row - the (block) row to set
1253: - v - a logically two-dimensional array of values
1255: Notes:
1256: The values, v, are column-oriented for the block version.
1258: All the nonzeros in the row must be provided
1260: THE MATRIX MUSAT HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1262: The row must belong to this process
1264: Level: advanced
1266: Concepts: matrices^putting entries in
1268: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1269: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1270: @*/
1271: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1272: {
1278: MatCheckPreallocated(mat,1);
1280: #if defined(PETSC_USE_DEBUG)
1281: if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1282: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1283: #endif
1284: mat->insertmode = INSERT_VALUES;
1286: if (mat->assembled) {
1287: mat->was_assembled = PETSC_TRUE;
1288: mat->assembled = PETSC_FALSE;
1289: }
1290: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1291: if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1292: (*mat->ops->setvaluesrow)(mat,row,v);
1293: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1294: #if defined(PETSC_HAVE_CUSP)
1295: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1296: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1297: }
1298: #endif
1299: #if defined(PETSC_HAVE_VIENNACL)
1300: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1301: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1302: }
1303: #endif
1304: return(0);
1305: }
1309: /*@
1310: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1311: Using structured grid indexing
1313: Not Collective
1315: Input Parameters:
1316: + mat - the matrix
1317: . m - number of rows being entered
1318: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1319: . n - number of columns being entered
1320: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1321: . v - a logically two-dimensional array of values
1322: - addv - either ADD_VALUES or INSERT_VALUES, where
1323: ADD_VALUES adds values to any existing entries, and
1324: INSERT_VALUES replaces existing entries with new values
1326: Notes:
1327: By default the values, v, are row-oriented. See MatSetOption() for other options.
1329: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1330: options cannot be mixed without intervening calls to the assembly
1331: routines.
1333: The grid coordinates are across the entire grid, not just the local portion
1335: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1336: as well as in C.
1338: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1340: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1341: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1343: The columns and rows in the stencil passed in MUST be contained within the
1344: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1345: if you create a DMDA with an overlap of one grid level and on a particular process its first
1346: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1347: first i index you can use in your column and row indices in MatSetStencil() is 5.
1349: In Fortran idxm and idxn should be declared as
1350: $ MatStencil idxm(4,m),idxn(4,n)
1351: and the values inserted using
1352: $ idxm(MatStencil_i,1) = i
1353: $ idxm(MatStencil_j,1) = j
1354: $ idxm(MatStencil_k,1) = k
1355: $ idxm(MatStencil_c,1) = c
1356: etc
1358: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1359: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1360: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1361: DM_BOUNDARY_PERIODIC boundary type.
1363: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1364: a single value per point) you can skip filling those indices.
1366: Inspired by the structured grid interface to the HYPRE package
1367: (http://www.llnl.gov/CASC/hypre)
1369: Efficiency Alert:
1370: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1371: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1373: Level: beginner
1375: Concepts: matrices^putting entries in
1377: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1378: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1379: @*/
1380: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1381: {
1383: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1384: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1385: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1388: if (!m || !n) return(0); /* no values to insert */
1395: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1396: jdxm = buf; jdxn = buf+m;
1397: } else {
1398: PetscMalloc2(m,&bufm,n,&bufn);
1399: jdxm = bufm; jdxn = bufn;
1400: }
1401: for (i=0; i<m; i++) {
1402: for (j=0; j<3-sdim; j++) dxm++;
1403: tmp = *dxm++ - starts[0];
1404: for (j=0; j<dim-1; j++) {
1405: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1406: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1407: }
1408: if (mat->stencil.noc) dxm++;
1409: jdxm[i] = tmp;
1410: }
1411: for (i=0; i<n; i++) {
1412: for (j=0; j<3-sdim; j++) dxn++;
1413: tmp = *dxn++ - starts[0];
1414: for (j=0; j<dim-1; j++) {
1415: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1416: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1417: }
1418: if (mat->stencil.noc) dxn++;
1419: jdxn[i] = tmp;
1420: }
1421: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1422: PetscFree2(bufm,bufn);
1423: return(0);
1424: }
1428: /*@
1429: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1430: Using structured grid indexing
1432: Not Collective
1434: Input Parameters:
1435: + mat - the matrix
1436: . m - number of rows being entered
1437: . idxm - grid coordinates for matrix rows being entered
1438: . n - number of columns being entered
1439: . idxn - grid coordinates for matrix columns being entered
1440: . v - a logically two-dimensional array of values
1441: - addv - either ADD_VALUES or INSERT_VALUES, where
1442: ADD_VALUES adds values to any existing entries, and
1443: INSERT_VALUES replaces existing entries with new values
1445: Notes:
1446: By default the values, v, are row-oriented and unsorted.
1447: See MatSetOption() for other options.
1449: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1450: options cannot be mixed without intervening calls to the assembly
1451: routines.
1453: The grid coordinates are across the entire grid, not just the local portion
1455: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1456: as well as in C.
1458: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1460: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1461: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1463: The columns and rows in the stencil passed in MUST be contained within the
1464: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1465: if you create a DMDA with an overlap of one grid level and on a particular process its first
1466: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1467: first i index you can use in your column and row indices in MatSetStencil() is 5.
1469: In Fortran idxm and idxn should be declared as
1470: $ MatStencil idxm(4,m),idxn(4,n)
1471: and the values inserted using
1472: $ idxm(MatStencil_i,1) = i
1473: $ idxm(MatStencil_j,1) = j
1474: $ idxm(MatStencil_k,1) = k
1475: etc
1477: Negative indices may be passed in idxm and idxn, these rows and columns are
1478: simply ignored. This allows easily inserting element stiffness matrices
1479: with homogeneous Dirchlet boundary conditions that you don't want represented
1480: in the matrix.
1482: Inspired by the structured grid interface to the HYPRE package
1483: (http://www.llnl.gov/CASC/hypre)
1485: Level: beginner
1487: Concepts: matrices^putting entries in
1489: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1490: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1491: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1492: @*/
1493: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1494: {
1496: PetscInt buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1497: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1498: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1501: if (!m || !n) return(0); /* no values to insert */
1508: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1509: jdxm = buf; jdxn = buf+m;
1510: } else {
1511: PetscMalloc2(m,&bufm,n,&bufn);
1512: jdxm = bufm; jdxn = bufn;
1513: }
1514: for (i=0; i<m; i++) {
1515: for (j=0; j<3-sdim; j++) dxm++;
1516: tmp = *dxm++ - starts[0];
1517: for (j=0; j<sdim-1; j++) {
1518: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1519: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1520: }
1521: dxm++;
1522: jdxm[i] = tmp;
1523: }
1524: for (i=0; i<n; i++) {
1525: for (j=0; j<3-sdim; j++) dxn++;
1526: tmp = *dxn++ - starts[0];
1527: for (j=0; j<sdim-1; j++) {
1528: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1529: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1530: }
1531: dxn++;
1532: jdxn[i] = tmp;
1533: }
1534: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1535: PetscFree2(bufm,bufn);
1536: #if defined(PETSC_HAVE_CUSP)
1537: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1538: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1539: }
1540: #endif
1541: #if defined(PETSC_HAVE_VIENNACL)
1542: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1543: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1544: }
1545: #endif
1546: return(0);
1547: }
1551: /*@
1552: MatSetStencil - Sets the grid information for setting values into a matrix via
1553: MatSetValuesStencil()
1555: Not Collective
1557: Input Parameters:
1558: + mat - the matrix
1559: . dim - dimension of the grid 1, 2, or 3
1560: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1561: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1562: - dof - number of degrees of freedom per node
1565: Inspired by the structured grid interface to the HYPRE package
1566: (www.llnl.gov/CASC/hyper)
1568: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1569: user.
1571: Level: beginner
1573: Concepts: matrices^putting entries in
1575: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1576: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1577: @*/
1578: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1579: {
1580: PetscInt i;
1587: mat->stencil.dim = dim + (dof > 1);
1588: for (i=0; i<dim; i++) {
1589: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1590: mat->stencil.starts[i] = starts[dim-i-1];
1591: }
1592: mat->stencil.dims[dim] = dof;
1593: mat->stencil.starts[dim] = 0;
1594: mat->stencil.noc = (PetscBool)(dof == 1);
1595: return(0);
1596: }
1600: /*@
1601: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1603: Not Collective
1605: Input Parameters:
1606: + mat - the matrix
1607: . v - a logically two-dimensional array of values
1608: . m, idxm - the number of block rows and their global block indices
1609: . n, idxn - the number of block columns and their global block indices
1610: - addv - either ADD_VALUES or INSERT_VALUES, where
1611: ADD_VALUES adds values to any existing entries, and
1612: INSERT_VALUES replaces existing entries with new values
1614: Notes:
1615: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1616: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1618: The m and n count the NUMBER of blocks in the row direction and column direction,
1619: NOT the total number of rows/columns; for example, if the block size is 2 and
1620: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1621: The values in idxm would be 1 2; that is the first index for each block divided by
1622: the block size.
1624: Note that you must call MatSetBlockSize() when constructing this matrix (before
1625: preallocating it).
1627: By default the values, v, are row-oriented, so the layout of
1628: v is the same as for MatSetValues(). See MatSetOption() for other options.
1630: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1631: options cannot be mixed without intervening calls to the assembly
1632: routines.
1634: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1635: as well as in C.
1637: Negative indices may be passed in idxm and idxn, these rows and columns are
1638: simply ignored. This allows easily inserting element stiffness matrices
1639: with homogeneous Dirchlet boundary conditions that you don't want represented
1640: in the matrix.
1642: Each time an entry is set within a sparse matrix via MatSetValues(),
1643: internal searching must be done to determine where to place the the
1644: data in the matrix storage space. By instead inserting blocks of
1645: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1646: reduced.
1648: Example:
1649: $ Suppose m=n=2 and block size(bs) = 2 The array is
1650: $
1651: $ 1 2 | 3 4
1652: $ 5 6 | 7 8
1653: $ - - - | - - -
1654: $ 9 10 | 11 12
1655: $ 13 14 | 15 16
1656: $
1657: $ v[] should be passed in like
1658: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1659: $
1660: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1661: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1663: Level: intermediate
1665: Concepts: matrices^putting entries in blocked
1667: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1668: @*/
1669: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1670: {
1676: if (!m || !n) return(0); /* no values to insert */
1680: MatCheckPreallocated(mat,1);
1681: if (mat->insertmode == NOT_SET_VALUES) {
1682: mat->insertmode = addv;
1683: }
1684: #if defined(PETSC_USE_DEBUG)
1685: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1686: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1687: if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1688: #endif
1690: if (mat->assembled) {
1691: mat->was_assembled = PETSC_TRUE;
1692: mat->assembled = PETSC_FALSE;
1693: }
1694: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1695: if (mat->ops->setvaluesblocked) {
1696: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1697: } else {
1698: PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1699: PetscInt i,j,bs,cbs;
1700: MatGetBlockSizes(mat,&bs,&cbs);
1701: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1702: iidxm = buf; iidxn = buf + m*bs;
1703: } else {
1704: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1705: iidxm = bufr; iidxn = bufc;
1706: }
1707: for (i=0; i<m; i++) {
1708: for (j=0; j<bs; j++) {
1709: iidxm[i*bs+j] = bs*idxm[i] + j;
1710: }
1711: }
1712: for (i=0; i<n; i++) {
1713: for (j=0; j<cbs; j++) {
1714: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1715: }
1716: }
1717: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1718: PetscFree2(bufr,bufc);
1719: }
1720: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1721: #if defined(PETSC_HAVE_CUSP)
1722: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1723: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1724: }
1725: #endif
1726: #if defined(PETSC_HAVE_VIENNACL)
1727: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1728: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1729: }
1730: #endif
1731: return(0);
1732: }
1736: /*@
1737: MatGetValues - Gets a block of values from a matrix.
1739: Not Collective; currently only returns a local block
1741: Input Parameters:
1742: + mat - the matrix
1743: . v - a logically two-dimensional array for storing the values
1744: . m, idxm - the number of rows and their global indices
1745: - n, idxn - the number of columns and their global indices
1747: Notes:
1748: The user must allocate space (m*n PetscScalars) for the values, v.
1749: The values, v, are then returned in a row-oriented format,
1750: analogous to that used by default in MatSetValues().
1752: MatGetValues() uses 0-based row and column numbers in
1753: Fortran as well as in C.
1755: MatGetValues() requires that the matrix has been assembled
1756: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1757: MatSetValues() and MatGetValues() CANNOT be made in succession
1758: without intermediate matrix assembly.
1760: Negative row or column indices will be ignored and those locations in v[] will be
1761: left unchanged.
1763: Level: advanced
1765: Concepts: matrices^accessing values
1767: .seealso: MatGetRow(), MatGetSubMatrices(), MatSetValues()
1768: @*/
1769: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1770: {
1776: if (!m || !n) return(0);
1780: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1781: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1782: if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1783: MatCheckPreallocated(mat,1);
1785: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1786: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1787: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1788: return(0);
1789: }
1793: /*@
1794: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1795: the same size. Currently, this can only be called once and creates the given matrix.
1797: Not Collective
1799: Input Parameters:
1800: + mat - the matrix
1801: . nb - the number of blocks
1802: . bs - the number of rows (and columns) in each block
1803: . rows - a concatenation of the rows for each block
1804: - v - a concatenation of logically two-dimensional arrays of values
1806: Notes:
1807: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1809: Level: advanced
1811: Concepts: matrices^putting entries in
1813: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1814: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1815: @*/
1816: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1817: {
1825: #if defined(PETSC_USE_DEBUG)
1826: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1827: #endif
1829: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1830: if (mat->ops->setvaluesbatch) {
1831: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1832: } else {
1833: PetscInt b;
1834: for (b = 0; b < nb; ++b) {
1835: MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1836: }
1837: }
1838: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1839: return(0);
1840: }
1844: /*@
1845: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1846: the routine MatSetValuesLocal() to allow users to insert matrix entries
1847: using a local (per-processor) numbering.
1849: Not Collective
1851: Input Parameters:
1852: + x - the matrix
1853: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1854: - cmapping - column mapping
1856: Level: intermediate
1858: Concepts: matrices^local to global mapping
1859: Concepts: local to global mapping^for matrices
1861: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1862: @*/
1863: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1864: {
1873: if (x->ops->setlocaltoglobalmapping) {
1874: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1875: } else {
1876: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1877: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1878: }
1879: return(0);
1880: }
1885: /*@
1886: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
1888: Not Collective
1890: Input Parameters:
1891: . A - the matrix
1893: Output Parameters:
1894: + rmapping - row mapping
1895: - cmapping - column mapping
1897: Level: advanced
1899: Concepts: matrices^local to global mapping
1900: Concepts: local to global mapping^for matrices
1902: .seealso: MatSetValuesLocal()
1903: @*/
1904: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1905: {
1911: if (rmapping) *rmapping = A->rmap->mapping;
1912: if (cmapping) *cmapping = A->cmap->mapping;
1913: return(0);
1914: }
1918: /*@
1919: MatGetLayouts - Gets the PetscLayout objects for rows and columns
1921: Not Collective
1923: Input Parameters:
1924: . A - the matrix
1926: Output Parameters:
1927: + rmap - row layout
1928: - cmap - column layout
1930: Level: advanced
1932: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping()
1933: @*/
1934: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
1935: {
1941: if (rmap) *rmap = A->rmap;
1942: if (cmap) *cmap = A->cmap;
1943: return(0);
1944: }
1948: /*@
1949: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
1950: using a local ordering of the nodes.
1952: Not Collective
1954: Input Parameters:
1955: + x - the matrix
1956: . nrow, irow - number of rows and their local indices
1957: . ncol, icol - number of columns and their local indices
1958: . y - a logically two-dimensional array of values
1959: - addv - either INSERT_VALUES or ADD_VALUES, where
1960: ADD_VALUES adds values to any existing entries, and
1961: INSERT_VALUES replaces existing entries with new values
1963: Notes:
1964: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1965: MatSetUp() before using this routine
1967: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
1969: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
1970: options cannot be mixed without intervening calls to the assembly
1971: routines.
1973: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1974: MUST be called after all calls to MatSetValuesLocal() have been completed.
1976: Level: intermediate
1978: Concepts: matrices^putting entries in with local numbering
1980: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
1981: MatSetValueLocal()
1982: @*/
1983: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
1984: {
1990: MatCheckPreallocated(mat,1);
1991: if (!nrow || !ncol) return(0); /* no values to insert */
1995: if (mat->insertmode == NOT_SET_VALUES) {
1996: mat->insertmode = addv;
1997: }
1998: #if defined(PETSC_USE_DEBUG)
1999: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2000: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2001: if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2002: #endif
2004: if (mat->assembled) {
2005: mat->was_assembled = PETSC_TRUE;
2006: mat->assembled = PETSC_FALSE;
2007: }
2008: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2009: if (mat->ops->setvalueslocal) {
2010: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2011: } else {
2012: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2013: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2014: irowm = buf; icolm = buf+nrow;
2015: } else {
2016: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2017: irowm = bufr; icolm = bufc;
2018: }
2019: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2020: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2021: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2022: PetscFree2(bufr,bufc);
2023: }
2024: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2025: #if defined(PETSC_HAVE_CUSP)
2026: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2027: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2028: }
2029: #endif
2030: #if defined(PETSC_HAVE_VIENNACL)
2031: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2032: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2033: }
2034: #endif
2035: return(0);
2036: }
2040: /*@
2041: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2042: using a local ordering of the nodes a block at a time.
2044: Not Collective
2046: Input Parameters:
2047: + x - the matrix
2048: . nrow, irow - number of rows and their local indices
2049: . ncol, icol - number of columns and their local indices
2050: . y - a logically two-dimensional array of values
2051: - addv - either INSERT_VALUES or ADD_VALUES, where
2052: ADD_VALUES adds values to any existing entries, and
2053: INSERT_VALUES replaces existing entries with new values
2055: Notes:
2056: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2057: MatSetUp() before using this routine
2059: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2060: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2062: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2063: options cannot be mixed without intervening calls to the assembly
2064: routines.
2066: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2067: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2069: Level: intermediate
2071: Concepts: matrices^putting blocked values in with local numbering
2073: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2074: MatSetValuesLocal(), MatSetValuesBlocked()
2075: @*/
2076: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2077: {
2083: MatCheckPreallocated(mat,1);
2084: if (!nrow || !ncol) return(0); /* no values to insert */
2088: if (mat->insertmode == NOT_SET_VALUES) {
2089: mat->insertmode = addv;
2090: }
2091: #if defined(PETSC_USE_DEBUG)
2092: else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2093: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2094: if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2095: #endif
2097: if (mat->assembled) {
2098: mat->was_assembled = PETSC_TRUE;
2099: mat->assembled = PETSC_FALSE;
2100: }
2101: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2102: if (mat->ops->setvaluesblockedlocal) {
2103: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2104: } else {
2105: PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2106: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2107: irowm = buf; icolm = buf + nrow;
2108: } else {
2109: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2110: irowm = bufr; icolm = bufc;
2111: }
2112: ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2113: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2114: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2115: PetscFree2(bufr,bufc);
2116: }
2117: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2118: #if defined(PETSC_HAVE_CUSP)
2119: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2120: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2121: }
2122: #endif
2123: #if defined(PETSC_HAVE_VIENNACL)
2124: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2125: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2126: }
2127: #endif
2128: return(0);
2129: }
2133: /*@
2134: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2136: Collective on Mat and Vec
2138: Input Parameters:
2139: + mat - the matrix
2140: - x - the vector to be multiplied
2142: Output Parameters:
2143: . y - the result
2145: Notes:
2146: The vectors x and y cannot be the same. I.e., one cannot
2147: call MatMult(A,y,y).
2149: Level: developer
2151: Concepts: matrix-vector product
2153: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2154: @*/
2155: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2156: {
2165: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2166: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2167: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2168: MatCheckPreallocated(mat,1);
2170: if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2171: (*mat->ops->multdiagonalblock)(mat,x,y);
2172: PetscObjectStateIncrease((PetscObject)y);
2173: return(0);
2174: }
2176: /* --------------------------------------------------------*/
2179: /*@
2180: MatMult - Computes the matrix-vector product, y = Ax.
2182: Neighbor-wise Collective on Mat and Vec
2184: Input Parameters:
2185: + mat - the matrix
2186: - x - the vector to be multiplied
2188: Output Parameters:
2189: . y - the result
2191: Notes:
2192: The vectors x and y cannot be the same. I.e., one cannot
2193: call MatMult(A,y,y).
2195: Level: beginner
2197: Concepts: matrix-vector product
2199: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2200: @*/
2201: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2202: {
2210: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2211: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2212: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2213: #if !defined(PETSC_HAVE_CONSTRAINTS)
2214: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2215: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2216: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2217: #endif
2218: VecLocked(y,3);
2219: if (mat->erroriffpe) {VecValidValues(x,2,PETSC_TRUE);}
2220: MatCheckPreallocated(mat,1);
2222: VecLockPush(x);
2223: if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2224: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2225: (*mat->ops->mult)(mat,x,y);
2226: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2227: if (mat->erroriffpe) {VecValidValues(y,3,PETSC_FALSE);}
2228: VecLockPop(x);
2229: return(0);
2230: }
2234: /*@
2235: MatMultTranspose - Computes matrix transpose times a vector.
2237: Neighbor-wise Collective on Mat and Vec
2239: Input Parameters:
2240: + mat - the matrix
2241: - x - the vector to be multilplied
2243: Output Parameters:
2244: . y - the result
2246: Notes:
2247: The vectors x and y cannot be the same. I.e., one cannot
2248: call MatMultTranspose(A,y,y).
2250: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2251: use MatMultHermitianTranspose()
2253: Level: beginner
2255: Concepts: matrix vector product^transpose
2257: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2258: @*/
2259: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2260: {
2269: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2270: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2271: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2272: #if !defined(PETSC_HAVE_CONSTRAINTS)
2273: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2274: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2275: #endif
2276: if (mat->erroriffpe) {VecValidValues(x,2,PETSC_TRUE);}
2277: MatCheckPreallocated(mat,1);
2279: if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2280: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2281: VecLockPush(x);
2282: (*mat->ops->multtranspose)(mat,x,y);
2283: VecLockPop(x);
2284: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2285: PetscObjectStateIncrease((PetscObject)y);
2286: if (mat->erroriffpe) {VecValidValues(y,3,PETSC_FALSE);}
2287: return(0);
2288: }
2292: /*@
2293: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2295: Neighbor-wise Collective on Mat and Vec
2297: Input Parameters:
2298: + mat - the matrix
2299: - x - the vector to be multilplied
2301: Output Parameters:
2302: . y - the result
2304: Notes:
2305: The vectors x and y cannot be the same. I.e., one cannot
2306: call MatMultHermitianTranspose(A,y,y).
2308: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2310: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2312: Level: beginner
2314: Concepts: matrix vector product^transpose
2316: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2317: @*/
2318: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2319: {
2321: Vec w;
2329: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2330: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2331: if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2332: #if !defined(PETSC_HAVE_CONSTRAINTS)
2333: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2334: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2335: #endif
2336: MatCheckPreallocated(mat,1);
2338: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2339: if (mat->ops->multhermitiantranspose) {
2340: VecLockPush(x);
2341: (*mat->ops->multhermitiantranspose)(mat,x,y);
2342: VecLockPop(x);
2343: } else {
2344: VecDuplicate(x,&w);
2345: VecCopy(x,w);
2346: VecConjugate(w);
2347: MatMultTranspose(mat,w,y);
2348: VecDestroy(&w);
2349: VecConjugate(y);
2350: }
2351: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2352: PetscObjectStateIncrease((PetscObject)y);
2353: return(0);
2354: }
2358: /*@
2359: MatMultAdd - Computes v3 = v2 + A * v1.
2361: Neighbor-wise Collective on Mat and Vec
2363: Input Parameters:
2364: + mat - the matrix
2365: - v1, v2 - the vectors
2367: Output Parameters:
2368: . v3 - the result
2370: Notes:
2371: The vectors v1 and v3 cannot be the same. I.e., one cannot
2372: call MatMultAdd(A,v1,v2,v1).
2374: Level: beginner
2376: Concepts: matrix vector product^addition
2378: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2379: @*/
2380: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2381: {
2391: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2392: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2393: if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2394: /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2395: if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2396: if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2397: if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2398: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2399: MatCheckPreallocated(mat,1);
2401: if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2402: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2403: VecLockPush(v1);
2404: (*mat->ops->multadd)(mat,v1,v2,v3);
2405: VecLockPop(v1);
2406: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2407: PetscObjectStateIncrease((PetscObject)v3);
2408: return(0);
2409: }
2413: /*@
2414: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2416: Neighbor-wise Collective on Mat and Vec
2418: Input Parameters:
2419: + mat - the matrix
2420: - v1, v2 - the vectors
2422: Output Parameters:
2423: . v3 - the result
2425: Notes:
2426: The vectors v1 and v3 cannot be the same. I.e., one cannot
2427: call MatMultTransposeAdd(A,v1,v2,v1).
2429: Level: beginner
2431: Concepts: matrix vector product^transpose and addition
2433: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2434: @*/
2435: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2436: {
2446: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2447: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2448: if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2449: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2450: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2451: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2452: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2453: MatCheckPreallocated(mat,1);
2455: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2456: VecLockPush(v1);
2457: (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2458: VecLockPop(v1);
2459: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2460: PetscObjectStateIncrease((PetscObject)v3);
2461: return(0);
2462: }
2466: /*@
2467: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2469: Neighbor-wise Collective on Mat and Vec
2471: Input Parameters:
2472: + mat - the matrix
2473: - v1, v2 - the vectors
2475: Output Parameters:
2476: . v3 - the result
2478: Notes:
2479: The vectors v1 and v3 cannot be the same. I.e., one cannot
2480: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2482: Level: beginner
2484: Concepts: matrix vector product^transpose and addition
2486: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2487: @*/
2488: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2489: {
2499: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2500: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2501: if (!mat->ops->multhermitiantransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2502: if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2503: if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2504: if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2505: if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2506: MatCheckPreallocated(mat,1);
2508: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2509: VecLockPush(v1);
2510: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2511: VecLockPop(v1);
2512: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2513: PetscObjectStateIncrease((PetscObject)v3);
2514: return(0);
2515: }
2519: /*@
2520: MatMultConstrained - The inner multiplication routine for a
2521: constrained matrix P^T A P.
2523: Neighbor-wise Collective on Mat and Vec
2525: Input Parameters:
2526: + mat - the matrix
2527: - x - the vector to be multilplied
2529: Output Parameters:
2530: . y - the result
2532: Notes:
2533: The vectors x and y cannot be the same. I.e., one cannot
2534: call MatMult(A,y,y).
2536: Level: beginner
2538: .keywords: matrix, multiply, matrix-vector product, constraint
2539: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2540: @*/
2541: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2542: {
2549: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2550: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2551: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2552: if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2553: if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2554: if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2556: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2557: VecLockPush(x);
2558: (*mat->ops->multconstrained)(mat,x,y);
2559: VecLockPop(x);
2560: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2561: PetscObjectStateIncrease((PetscObject)y);
2562: return(0);
2563: }
2567: /*@
2568: MatMultTransposeConstrained - The inner multiplication routine for a
2569: constrained matrix P^T A^T P.
2571: Neighbor-wise Collective on Mat and Vec
2573: Input Parameters:
2574: + mat - the matrix
2575: - x - the vector to be multilplied
2577: Output Parameters:
2578: . y - the result
2580: Notes:
2581: The vectors x and y cannot be the same. I.e., one cannot
2582: call MatMult(A,y,y).
2584: Level: beginner
2586: .keywords: matrix, multiply, matrix-vector product, constraint
2587: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2588: @*/
2589: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2590: {
2597: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2598: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2599: if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2600: if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2601: if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2603: PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2604: (*mat->ops->multtransposeconstrained)(mat,x,y);
2605: PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2606: PetscObjectStateIncrease((PetscObject)y);
2607: return(0);
2608: }
2612: /*@C
2613: MatGetFactorType - gets the type of factorization it is
2615: Note Collective
2616: as the flag
2618: Input Parameters:
2619: . mat - the matrix
2621: Output Parameters:
2622: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2624: Level: intermediate
2626: .seealso: MatFactorType, MatGetFactor()
2627: @*/
2628: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2629: {
2633: *t = mat->factortype;
2634: return(0);
2635: }
2637: /* ------------------------------------------------------------*/
2640: /*@C
2641: MatGetInfo - Returns information about matrix storage (number of
2642: nonzeros, memory, etc.).
2644: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2646: Input Parameters:
2647: . mat - the matrix
2649: Output Parameters:
2650: + flag - flag indicating the type of parameters to be returned
2651: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2652: MAT_GLOBAL_SUM - sum over all processors)
2653: - info - matrix information context
2655: Notes:
2656: The MatInfo context contains a variety of matrix data, including
2657: number of nonzeros allocated and used, number of mallocs during
2658: matrix assembly, etc. Additional information for factored matrices
2659: is provided (such as the fill ratio, number of mallocs during
2660: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2661: when using the runtime options
2662: $ -info -mat_view ::ascii_info
2664: Example for C/C++ Users:
2665: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2666: data within the MatInfo context. For example,
2667: .vb
2668: MatInfo info;
2669: Mat A;
2670: double mal, nz_a, nz_u;
2672: MatGetInfo(A,MAT_LOCAL,&info);
2673: mal = info.mallocs;
2674: nz_a = info.nz_allocated;
2675: .ve
2677: Example for Fortran Users:
2678: Fortran users should declare info as a double precision
2679: array of dimension MAT_INFO_SIZE, and then extract the parameters
2680: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2681: a complete list of parameter names.
2682: .vb
2683: double precision info(MAT_INFO_SIZE)
2684: double precision mal, nz_a
2685: Mat A
2686: integer ierr
2688: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2689: mal = info(MAT_INFO_MALLOCS)
2690: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2691: .ve
2693: Level: intermediate
2695: Concepts: matrices^getting information on
2697: Developer Note: fortran interface is not autogenerated as the f90
2698: interface defintion cannot be generated correctly [due to MatInfo]
2700: .seealso: MatStashGetInfo()
2702: @*/
2703: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2704: {
2711: if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2712: MatCheckPreallocated(mat,1);
2713: (*mat->ops->getinfo)(mat,flag,info);
2714: return(0);
2715: }
2717: /* ----------------------------------------------------------*/
2721: /*@C
2722: MatLUFactor - Performs in-place LU factorization of matrix.
2724: Collective on Mat
2726: Input Parameters:
2727: + mat - the matrix
2728: . row - row permutation
2729: . col - column permutation
2730: - info - options for factorization, includes
2731: $ fill - expected fill as ratio of original fill.
2732: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2733: $ Run with the option -info to determine an optimal value to use
2735: Notes:
2736: Most users should employ the simplified KSP interface for linear solvers
2737: instead of working directly with matrix algebra routines such as this.
2738: See, e.g., KSPCreate().
2740: This changes the state of the matrix to a factored matrix; it cannot be used
2741: for example with MatSetValues() unless one first calls MatSetUnfactored().
2743: Level: developer
2745: Concepts: matrices^LU factorization
2747: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2748: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2750: Developer Note: fortran interface is not autogenerated as the f90
2751: interface defintion cannot be generated correctly [due to MatFactorInfo]
2753: @*/
2754: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2755: {
2757: MatFactorInfo tinfo;
2765: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2766: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2767: if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2768: MatCheckPreallocated(mat,1);
2769: if (!info) {
2770: MatFactorInfoInitialize(&tinfo);
2771: info = &tinfo;
2772: }
2774: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2775: (*mat->ops->lufactor)(mat,row,col,info);
2776: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2777: PetscObjectStateIncrease((PetscObject)mat);
2778: return(0);
2779: }
2783: /*@C
2784: MatILUFactor - Performs in-place ILU factorization of matrix.
2786: Collective on Mat
2788: Input Parameters:
2789: + mat - the matrix
2790: . row - row permutation
2791: . col - column permutation
2792: - info - structure containing
2793: $ levels - number of levels of fill.
2794: $ expected fill - as ratio of original fill.
2795: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2796: missing diagonal entries)
2798: Notes:
2799: Probably really in-place only when level of fill is zero, otherwise allocates
2800: new space to store factored matrix and deletes previous memory.
2802: Most users should employ the simplified KSP interface for linear solvers
2803: instead of working directly with matrix algebra routines such as this.
2804: See, e.g., KSPCreate().
2806: Level: developer
2808: Concepts: matrices^ILU factorization
2810: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2812: Developer Note: fortran interface is not autogenerated as the f90
2813: interface defintion cannot be generated correctly [due to MatFactorInfo]
2815: @*/
2816: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2817: {
2826: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2827: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2828: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2829: if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2830: MatCheckPreallocated(mat,1);
2832: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2833: (*mat->ops->ilufactor)(mat,row,col,info);
2834: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2835: PetscObjectStateIncrease((PetscObject)mat);
2836: return(0);
2837: }
2841: /*@C
2842: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2843: Call this routine before calling MatLUFactorNumeric().
2845: Collective on Mat
2847: Input Parameters:
2848: + fact - the factor matrix obtained with MatGetFactor()
2849: . mat - the matrix
2850: . row, col - row and column permutations
2851: - info - options for factorization, includes
2852: $ fill - expected fill as ratio of original fill.
2853: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2854: $ Run with the option -info to determine an optimal value to use
2857: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2859: Most users should employ the simplified KSP interface for linear solvers
2860: instead of working directly with matrix algebra routines such as this.
2861: See, e.g., KSPCreate().
2863: Level: developer
2865: Concepts: matrices^LU symbolic factorization
2867: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2869: Developer Note: fortran interface is not autogenerated as the f90
2870: interface defintion cannot be generated correctly [due to MatFactorInfo]
2872: @*/
2873: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2874: {
2884: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2885: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2886: if (!(fact)->ops->lufactorsymbolic) {
2887: const MatSolverPackage spackage;
2888: MatFactorGetSolverPackage(fact,&spackage);
2889: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2890: }
2891: MatCheckPreallocated(mat,2);
2893: PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2894: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2895: PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2896: PetscObjectStateIncrease((PetscObject)fact);
2897: return(0);
2898: }
2902: /*@C
2903: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2904: Call this routine after first calling MatLUFactorSymbolic().
2906: Collective on Mat
2908: Input Parameters:
2909: + fact - the factor matrix obtained with MatGetFactor()
2910: . mat - the matrix
2911: - info - options for factorization
2913: Notes:
2914: See MatLUFactor() for in-place factorization. See
2915: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
2917: Most users should employ the simplified KSP interface for linear solvers
2918: instead of working directly with matrix algebra routines such as this.
2919: See, e.g., KSPCreate().
2921: Level: developer
2923: Concepts: matrices^LU numeric factorization
2925: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
2927: Developer Note: fortran interface is not autogenerated as the f90
2928: interface defintion cannot be generated correctly [due to MatFactorInfo]
2930: @*/
2931: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
2932: {
2940: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2941: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
2943: if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
2944: MatCheckPreallocated(mat,2);
2945: PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
2946: (fact->ops->lufactornumeric)(fact,mat,info);
2947: PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
2948: MatViewFromOptions(fact,NULL,"-mat_factor_view");
2949: PetscObjectStateIncrease((PetscObject)fact);
2950: return(0);
2951: }
2955: /*@C
2956: MatCholeskyFactor - Performs in-place Cholesky factorization of a
2957: symmetric matrix.
2959: Collective on Mat
2961: Input Parameters:
2962: + mat - the matrix
2963: . perm - row and column permutations
2964: - f - expected fill as ratio of original fill
2966: Notes:
2967: See MatLUFactor() for the nonsymmetric case. See also
2968: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
2970: Most users should employ the simplified KSP interface for linear solvers
2971: instead of working directly with matrix algebra routines such as this.
2972: See, e.g., KSPCreate().
2974: Level: developer
2976: Concepts: matrices^Cholesky factorization
2978: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
2979: MatGetOrdering()
2981: Developer Note: fortran interface is not autogenerated as the f90
2982: interface defintion cannot be generated correctly [due to MatFactorInfo]
2984: @*/
2985: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
2986: {
2994: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
2995: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2996: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2997: if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2998: MatCheckPreallocated(mat,1);
3000: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3001: (*mat->ops->choleskyfactor)(mat,perm,info);
3002: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3003: PetscObjectStateIncrease((PetscObject)mat);
3004: return(0);
3005: }
3009: /*@C
3010: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3011: of a symmetric matrix.
3013: Collective on Mat
3015: Input Parameters:
3016: + fact - the factor matrix obtained with MatGetFactor()
3017: . mat - the matrix
3018: . perm - row and column permutations
3019: - info - options for factorization, includes
3020: $ fill - expected fill as ratio of original fill.
3021: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3022: $ Run with the option -info to determine an optimal value to use
3024: Notes:
3025: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3026: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3028: Most users should employ the simplified KSP interface for linear solvers
3029: instead of working directly with matrix algebra routines such as this.
3030: See, e.g., KSPCreate().
3032: Level: developer
3034: Concepts: matrices^Cholesky symbolic factorization
3036: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3037: MatGetOrdering()
3039: Developer Note: fortran interface is not autogenerated as the f90
3040: interface defintion cannot be generated correctly [due to MatFactorInfo]
3042: @*/
3043: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3044: {
3053: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3054: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3055: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3056: if (!(fact)->ops->choleskyfactorsymbolic) {
3057: const MatSolverPackage spackage;
3058: MatFactorGetSolverPackage(fact,&spackage);
3059: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3060: }
3061: MatCheckPreallocated(mat,2);
3063: PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3064: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3065: PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3066: PetscObjectStateIncrease((PetscObject)fact);
3067: return(0);
3068: }
3072: /*@C
3073: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3074: of a symmetric matrix. Call this routine after first calling
3075: MatCholeskyFactorSymbolic().
3077: Collective on Mat
3079: Input Parameters:
3080: + fact - the factor matrix obtained with MatGetFactor()
3081: . mat - the initial matrix
3082: . info - options for factorization
3083: - fact - the symbolic factor of mat
3086: Notes:
3087: Most users should employ the simplified KSP interface for linear solvers
3088: instead of working directly with matrix algebra routines such as this.
3089: See, e.g., KSPCreate().
3091: Level: developer
3093: Concepts: matrices^Cholesky numeric factorization
3095: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3097: Developer Note: fortran interface is not autogenerated as the f90
3098: interface defintion cannot be generated correctly [due to MatFactorInfo]
3100: @*/
3101: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3102: {
3110: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3111: if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3112: if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3113: MatCheckPreallocated(mat,2);
3115: PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3116: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3117: PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3118: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3119: PetscObjectStateIncrease((PetscObject)fact);
3120: return(0);
3121: }
3123: /* ----------------------------------------------------------------*/
3126: /*@
3127: MatSolve - Solves A x = b, given a factored matrix.
3129: Neighbor-wise Collective on Mat and Vec
3131: Input Parameters:
3132: + mat - the factored matrix
3133: - b - the right-hand-side vector
3135: Output Parameter:
3136: . x - the result vector
3138: Notes:
3139: The vectors b and x cannot be the same. I.e., one cannot
3140: call MatSolve(A,x,x).
3142: Notes:
3143: Most users should employ the simplified KSP interface for linear solvers
3144: instead of working directly with matrix algebra routines such as this.
3145: See, e.g., KSPCreate().
3147: Level: developer
3149: Concepts: matrices^triangular solves
3151: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3152: @*/
3153: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3154: {
3164: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3165: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3166: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3167: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3168: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3169: if (!mat->rmap->N && !mat->cmap->N) return(0);
3170: if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3171: MatCheckPreallocated(mat,1);
3173: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3174: (*mat->ops->solve)(mat,b,x);
3175: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3176: PetscObjectStateIncrease((PetscObject)x);
3177: return(0);
3178: }
3182: PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X)
3183: {
3185: Vec b,x;
3186: PetscInt m,N,i;
3187: PetscScalar *bb,*xx;
3188: PetscBool flg;
3191: PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3192: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3193: PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3194: if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");
3196: MatDenseGetArray(B,&bb);
3197: MatDenseGetArray(X,&xx);
3198: MatGetLocalSize(B,&m,NULL); /* number local rows */
3199: MatGetSize(B,NULL,&N); /* total columns in dense matrix */
3200: MatCreateVecs(A,&x,&b);
3201: for (i=0; i<N; i++) {
3202: VecPlaceArray(b,bb + i*m);
3203: VecPlaceArray(x,xx + i*m);
3204: MatSolve(A,b,x);
3205: VecResetArray(x);
3206: VecResetArray(b);
3207: }
3208: VecDestroy(&b);
3209: VecDestroy(&x);
3210: MatDenseRestoreArray(B,&bb);
3211: MatDenseRestoreArray(X,&xx);
3212: return(0);
3213: }
3217: /*@
3218: MatMatSolve - Solves A X = B, given a factored matrix.
3220: Neighbor-wise Collective on Mat
3222: Input Parameters:
3223: + A - the factored matrix
3224: - B - the right-hand-side matrix (dense matrix)
3226: Output Parameter:
3227: . X - the result matrix (dense matrix)
3229: Notes:
3230: The matrices b and x cannot be the same. I.e., one cannot
3231: call MatMatSolve(A,x,x).
3233: Notes:
3234: Most users should usually employ the simplified KSP interface for linear solvers
3235: instead of working directly with matrix algebra routines such as this.
3236: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3237: at a time.
3239: When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3240: it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.
3242: Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.
3244: Level: developer
3246: Concepts: matrices^triangular solves
3248: .seealso: MatMatSolveAdd(), MatMatSolveTranspose(), MatMatSolveTransposeAdd(), MatLUFactor(), MatCholeskyFactor()
3249: @*/
3250: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3251: {
3261: if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3262: if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3263: if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3264: if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3265: if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3266: if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3267: if (!A->rmap->N && !A->cmap->N) return(0);
3268: MatCheckPreallocated(A,1);
3270: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3271: if (!A->ops->matsolve) {
3272: PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3273: MatMatSolve_Basic(A,B,X);
3274: } else {
3275: (*A->ops->matsolve)(A,B,X);
3276: }
3277: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3278: PetscObjectStateIncrease((PetscObject)X);
3279: return(0);
3280: }
3285: /*@
3286: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3287: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3289: Neighbor-wise Collective on Mat and Vec
3291: Input Parameters:
3292: + mat - the factored matrix
3293: - b - the right-hand-side vector
3295: Output Parameter:
3296: . x - the result vector
3298: Notes:
3299: MatSolve() should be used for most applications, as it performs
3300: a forward solve followed by a backward solve.
3302: The vectors b and x cannot be the same, i.e., one cannot
3303: call MatForwardSolve(A,x,x).
3305: For matrix in seqsbaij format with block size larger than 1,
3306: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3307: MatForwardSolve() solves U^T*D y = b, and
3308: MatBackwardSolve() solves U x = y.
3309: Thus they do not provide a symmetric preconditioner.
3311: Most users should employ the simplified KSP interface for linear solvers
3312: instead of working directly with matrix algebra routines such as this.
3313: See, e.g., KSPCreate().
3315: Level: developer
3317: Concepts: matrices^forward solves
3319: .seealso: MatSolve(), MatBackwardSolve()
3320: @*/
3321: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3322: {
3332: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3333: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3334: if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3335: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3336: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3337: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3338: MatCheckPreallocated(mat,1);
3339: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3340: (*mat->ops->forwardsolve)(mat,b,x);
3341: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3342: PetscObjectStateIncrease((PetscObject)x);
3343: return(0);
3344: }
3348: /*@
3349: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3350: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3352: Neighbor-wise Collective on Mat and Vec
3354: Input Parameters:
3355: + mat - the factored matrix
3356: - b - the right-hand-side vector
3358: Output Parameter:
3359: . x - the result vector
3361: Notes:
3362: MatSolve() should be used for most applications, as it performs
3363: a forward solve followed by a backward solve.
3365: The vectors b and x cannot be the same. I.e., one cannot
3366: call MatBackwardSolve(A,x,x).
3368: For matrix in seqsbaij format with block size larger than 1,
3369: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3370: MatForwardSolve() solves U^T*D y = b, and
3371: MatBackwardSolve() solves U x = y.
3372: Thus they do not provide a symmetric preconditioner.
3374: Most users should employ the simplified KSP interface for linear solvers
3375: instead of working directly with matrix algebra routines such as this.
3376: See, e.g., KSPCreate().
3378: Level: developer
3380: Concepts: matrices^backward solves
3382: .seealso: MatSolve(), MatForwardSolve()
3383: @*/
3384: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3385: {
3395: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3396: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3397: if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3398: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3399: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3400: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3401: MatCheckPreallocated(mat,1);
3403: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3404: (*mat->ops->backwardsolve)(mat,b,x);
3405: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3406: PetscObjectStateIncrease((PetscObject)x);
3407: return(0);
3408: }
3412: /*@
3413: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3415: Neighbor-wise Collective on Mat and Vec
3417: Input Parameters:
3418: + mat - the factored matrix
3419: . b - the right-hand-side vector
3420: - y - the vector to be added to
3422: Output Parameter:
3423: . x - the result vector
3425: Notes:
3426: The vectors b and x cannot be the same. I.e., one cannot
3427: call MatSolveAdd(A,x,y,x).
3429: Most users should employ the simplified KSP interface for linear solvers
3430: instead of working directly with matrix algebra routines such as this.
3431: See, e.g., KSPCreate().
3433: Level: developer
3435: Concepts: matrices^triangular solves
3437: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3438: @*/
3439: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3440: {
3441: PetscScalar one = 1.0;
3442: Vec tmp;
3454: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3455: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3456: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3457: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3458: if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3459: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3460: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3461: MatCheckPreallocated(mat,1);
3463: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3464: if (mat->ops->solveadd) {
3465: (*mat->ops->solveadd)(mat,b,y,x);
3466: } else {
3467: /* do the solve then the add manually */
3468: if (x != y) {
3469: MatSolve(mat,b,x);
3470: VecAXPY(x,one,y);
3471: } else {
3472: VecDuplicate(x,&tmp);
3473: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3474: VecCopy(x,tmp);
3475: MatSolve(mat,b,x);
3476: VecAXPY(x,one,tmp);
3477: VecDestroy(&tmp);
3478: }
3479: }
3480: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3481: PetscObjectStateIncrease((PetscObject)x);
3482: return(0);
3483: }
3487: /*@
3488: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3490: Neighbor-wise Collective on Mat and Vec
3492: Input Parameters:
3493: + mat - the factored matrix
3494: - b - the right-hand-side vector
3496: Output Parameter:
3497: . x - the result vector
3499: Notes:
3500: The vectors b and x cannot be the same. I.e., one cannot
3501: call MatSolveTranspose(A,x,x).
3503: Most users should employ the simplified KSP interface for linear solvers
3504: instead of working directly with matrix algebra routines such as this.
3505: See, e.g., KSPCreate().
3507: Level: developer
3509: Concepts: matrices^triangular solves
3511: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3512: @*/
3513: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3514: {
3524: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3525: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3526: if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3527: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3528: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3529: MatCheckPreallocated(mat,1);
3530: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3531: (*mat->ops->solvetranspose)(mat,b,x);
3532: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3533: PetscObjectStateIncrease((PetscObject)x);
3534: return(0);
3535: }
3539: /*@
3540: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3541: factored matrix.
3543: Neighbor-wise Collective on Mat and Vec
3545: Input Parameters:
3546: + mat - the factored matrix
3547: . b - the right-hand-side vector
3548: - y - the vector to be added to
3550: Output Parameter:
3551: . x - the result vector
3553: Notes:
3554: The vectors b and x cannot be the same. I.e., one cannot
3555: call MatSolveTransposeAdd(A,x,y,x).
3557: Most users should employ the simplified KSP interface for linear solvers
3558: instead of working directly with matrix algebra routines such as this.
3559: See, e.g., KSPCreate().
3561: Level: developer
3563: Concepts: matrices^triangular solves
3565: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3566: @*/
3567: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3568: {
3569: PetscScalar one = 1.0;
3571: Vec tmp;
3582: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3583: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3584: if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3585: if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3586: if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3587: if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3588: MatCheckPreallocated(mat,1);
3590: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3591: if (mat->ops->solvetransposeadd) {
3592: (*mat->ops->solvetransposeadd)(mat,b,y,x);
3593: } else {
3594: /* do the solve then the add manually */
3595: if (x != y) {
3596: MatSolveTranspose(mat,b,x);
3597: VecAXPY(x,one,y);
3598: } else {
3599: VecDuplicate(x,&tmp);
3600: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3601: VecCopy(x,tmp);
3602: MatSolveTranspose(mat,b,x);
3603: VecAXPY(x,one,tmp);
3604: VecDestroy(&tmp);
3605: }
3606: }
3607: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3608: PetscObjectStateIncrease((PetscObject)x);
3609: return(0);
3610: }
3611: /* ----------------------------------------------------------------*/
3615: /*@
3616: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3618: Neighbor-wise Collective on Mat and Vec
3620: Input Parameters:
3621: + mat - the matrix
3622: . b - the right hand side
3623: . omega - the relaxation factor
3624: . flag - flag indicating the type of SOR (see below)
3625: . shift - diagonal shift
3626: . its - the number of iterations
3627: - lits - the number of local iterations
3629: Output Parameters:
3630: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3632: SOR Flags:
3633: . SOR_FORWARD_SWEEP - forward SOR
3634: . SOR_BACKWARD_SWEEP - backward SOR
3635: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3636: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3637: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3638: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3639: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3640: upper/lower triangular part of matrix to
3641: vector (with omega)
3642: . SOR_ZERO_INITIAL_GUESS - zero initial guess
3644: Notes:
3645: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3646: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3647: on each processor.
3649: Application programmers will not generally use MatSOR() directly,
3650: but instead will employ the KSP/PC interface.
3652: Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3654: Notes for Advanced Users:
3655: The flags are implemented as bitwise inclusive or operations.
3656: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3657: to specify a zero initial guess for SSOR.
3659: Most users should employ the simplified KSP interface for linear solvers
3660: instead of working directly with matrix algebra routines such as this.
3661: See, e.g., KSPCreate().
3663: Vectors x and b CANNOT be the same
3665: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3667: Level: developer
3669: Concepts: matrices^relaxation
3670: Concepts: matrices^SOR
3671: Concepts: matrices^Gauss-Seidel
3673: @*/
3674: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3675: {
3685: if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3686: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3687: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3688: if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3689: if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3690: if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3691: if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3692: if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3693: if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");
3695: MatCheckPreallocated(mat,1);
3696: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3697: ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3698: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3699: PetscObjectStateIncrease((PetscObject)x);
3700: return(0);
3701: }
3705: /*
3706: Default matrix copy routine.
3707: */
3708: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3709: {
3710: PetscErrorCode ierr;
3711: PetscInt i,rstart = 0,rend = 0,nz;
3712: const PetscInt *cwork;
3713: const PetscScalar *vwork;
3716: if (B->assembled) {
3717: MatZeroEntries(B);
3718: }
3719: MatGetOwnershipRange(A,&rstart,&rend);
3720: for (i=rstart; i<rend; i++) {
3721: MatGetRow(A,i,&nz,&cwork,&vwork);
3722: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3723: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3724: }
3725: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3726: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3727: PetscObjectStateIncrease((PetscObject)B);
3728: return(0);
3729: }
3733: /*@
3734: MatCopy - Copys a matrix to another matrix.
3736: Collective on Mat
3738: Input Parameters:
3739: + A - the matrix
3740: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3742: Output Parameter:
3743: . B - where the copy is put
3745: Notes:
3746: If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3747: same nonzero pattern or the routine will crash.
3749: MatCopy() copies the matrix entries of a matrix to another existing
3750: matrix (after first zeroing the second matrix). A related routine is
3751: MatConvert(), which first creates a new matrix and then copies the data.
3753: Level: intermediate
3755: Concepts: matrices^copying
3757: .seealso: MatConvert(), MatDuplicate()
3759: @*/
3760: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3761: {
3763: PetscInt i;
3771: MatCheckPreallocated(B,2);
3772: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3773: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3774: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3775: MatCheckPreallocated(A,1);
3777: PetscLogEventBegin(MAT_Copy,A,B,0,0);
3778: if (A->ops->copy) {
3779: (*A->ops->copy)(A,B,str);
3780: } else { /* generic conversion */
3781: MatCopy_Basic(A,B,str);
3782: }
3784: B->stencil.dim = A->stencil.dim;
3785: B->stencil.noc = A->stencil.noc;
3786: for (i=0; i<=A->stencil.dim; i++) {
3787: B->stencil.dims[i] = A->stencil.dims[i];
3788: B->stencil.starts[i] = A->stencil.starts[i];
3789: }
3791: PetscLogEventEnd(MAT_Copy,A,B,0,0);
3792: PetscObjectStateIncrease((PetscObject)B);
3793: return(0);
3794: }
3798: /*@C
3799: MatConvert - Converts a matrix to another matrix, either of the same
3800: or different type.
3802: Collective on Mat
3804: Input Parameters:
3805: + mat - the matrix
3806: . newtype - new matrix type. Use MATSAME to create a new matrix of the
3807: same type as the original matrix.
3808: - reuse - denotes if the destination matrix is to be created or reused. Currently
3809: MAT_REUSE_MATRIX is only supported for inplace conversion, otherwise use
3810: MAT_INITIAL_MATRIX.
3812: Output Parameter:
3813: . M - pointer to place new matrix
3815: Notes:
3816: MatConvert() first creates a new matrix and then copies the data from
3817: the first matrix. A related routine is MatCopy(), which copies the matrix
3818: entries of one matrix to another already existing matrix context.
3820: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3821: the MPI communicator of the generated matrix is always the same as the communicator
3822: of the input matrix.
3824: Level: intermediate
3826: Concepts: matrices^converting between storage formats
3828: .seealso: MatCopy(), MatDuplicate()
3829: @*/
3830: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3831: {
3833: PetscBool sametype,issame,flg;
3834: char convname[256],mtype[256];
3835: Mat B;
3841: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3842: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3843: MatCheckPreallocated(mat,1);
3844: MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);
3846: PetscOptionsGetString(((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3847: if (flg) {
3848: newtype = mtype;
3849: }
3850: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3851: PetscStrcmp(newtype,"same",&issame);
3852: if ((reuse == MAT_REUSE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_REUSE_MATRIX only supported for in-place conversion currently");
3854: if ((reuse == MAT_REUSE_MATRIX) && (issame || sametype)) return(0);
3856: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
3857: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
3858: } else {
3859: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
3860: const char *prefix[3] = {"seq","mpi",""};
3861: PetscInt i;
3862: /*
3863: Order of precedence:
3864: 1) See if a specialized converter is known to the current matrix.
3865: 2) See if a specialized converter is known to the desired matrix class.
3866: 3) See if a good general converter is registered for the desired class
3867: (as of 6/27/03 only MATMPIADJ falls into this category).
3868: 4) See if a good general converter is known for the current matrix.
3869: 5) Use a really basic converter.
3870: */
3872: /* 1) See if a specialized converter is known to the current matrix and the desired class */
3873: for (i=0; i<3; i++) {
3874: PetscStrcpy(convname,"MatConvert_");
3875: PetscStrcat(convname,((PetscObject)mat)->type_name);
3876: PetscStrcat(convname,"_");
3877: PetscStrcat(convname,prefix[i]);
3878: PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
3879: PetscStrcat(convname,"_C");
3880: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
3881: if (conv) goto foundconv;
3882: }
3884: /* 2) See if a specialized converter is known to the desired matrix class. */
3885: MatCreate(PetscObjectComm((PetscObject)mat),&B);
3886: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
3887: MatSetType(B,newtype);
3888: for (i=0; i<3; i++) {
3889: PetscStrcpy(convname,"MatConvert_");
3890: PetscStrcat(convname,((PetscObject)mat)->type_name);
3891: PetscStrcat(convname,"_");
3892: PetscStrcat(convname,prefix[i]);
3893: PetscStrcat(convname,newtype);
3894: PetscStrcat(convname,"_C");
3895: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
3896: if (conv) {
3897: MatDestroy(&B);
3898: goto foundconv;
3899: }
3900: }
3902: /* 3) See if a good general converter is registered for the desired class */
3903: conv = B->ops->convertfrom;
3904: MatDestroy(&B);
3905: if (conv) goto foundconv;
3907: /* 4) See if a good general converter is known for the current matrix */
3908: if (mat->ops->convert) {
3909: conv = mat->ops->convert;
3910: }
3911: if (conv) goto foundconv;
3913: /* 5) Use a really basic converter. */
3914: conv = MatConvert_Basic;
3916: foundconv:
3917: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
3918: (*conv)(mat,newtype,reuse,M);
3919: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
3920: }
3921: PetscObjectStateIncrease((PetscObject)*M);
3923: /* Copy Mat options */
3924: if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
3925: if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
3926: return(0);
3927: }
3931: /*@C
3932: MatFactorGetSolverPackage - Returns name of the package providing the factorization routines
3934: Not Collective
3936: Input Parameter:
3937: . mat - the matrix, must be a factored matrix
3939: Output Parameter:
3940: . type - the string name of the package (do not free this string)
3942: Notes:
3943: In Fortran you pass in a empty string and the package name will be copied into it.
3944: (Make sure the string is long enough)
3946: Level: intermediate
3948: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
3949: @*/
3950: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
3951: {
3952: PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);
3957: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
3958: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
3959: if (!conv) {
3960: *type = MATSOLVERPETSC;
3961: } else {
3962: (*conv)(mat,type);
3963: }
3964: return(0);
3965: }
3967: typedef struct _MatSolverPackageForSpecifcType* MatSolverPackageForSpecifcType;
3968: struct _MatSolverPackageForSpecifcType {
3969: MatType mtype;
3970: PetscErrorCode (*getfactor[4])(Mat,MatFactorType,Mat*);
3971: MatSolverPackageForSpecifcType next;
3972: };
3974: typedef struct _MatSolverPackageHolder* MatSolverPackageHolder;
3975: struct _MatSolverPackageHolder {
3976: char *name;
3977: MatSolverPackageForSpecifcType handlers;
3978: MatSolverPackageHolder next;
3979: };
3981: static MatSolverPackageHolder MatSolverPackageHolders = NULL;
3985: /*@C
3986: MatSolvePackageRegister - Registers a MatSolverPackage that works for a particular matrix type
3988: Input Parameters:
3989: + package - name of the package, for example petsc or superlu
3990: . mtype - the matrix type that works with this package
3991: . ftype - the type of factorization supported by the package
3992: - getfactor - routine that will create the factored matrix ready to be used
3994: Level: intermediate
3996: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
3997: @*/
3998: PetscErrorCode MatSolverPackageRegister(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
3999: {
4000: PetscErrorCode ierr;
4001: MatSolverPackageHolder next = MatSolverPackageHolders,prev;
4002: PetscBool flg;
4003: MatSolverPackageForSpecifcType inext,iprev = NULL;
4006: if (!MatSolverPackageHolders) {
4007: PetscNew(&MatSolverPackageHolders);
4008: PetscStrallocpy(package,&MatSolverPackageHolders->name);
4009: PetscNew(&MatSolverPackageHolders->handlers);
4010: PetscStrallocpy(mtype,(char **)&MatSolverPackageHolders->handlers->mtype);
4011: MatSolverPackageHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4012: return(0);
4013: }
4014: while (next) {
4015: PetscStrcasecmp(package,next->name,&flg);
4016: if (flg) {
4017: inext = next->handlers;
4018: while (inext) {
4019: PetscStrcasecmp(mtype,inext->mtype,&flg);
4020: if (flg) {
4021: inext->getfactor[(int)ftype-1] = getfactor;
4022: return(0);
4023: }
4024: iprev = inext;
4025: inext = inext->next;
4026: }
4027: PetscNew(&iprev->next);
4028: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4029: iprev->next->getfactor[(int)ftype-1] = getfactor;
4030: return(0);
4031: }
4032: prev = next;
4033: next = next->next;
4034: }
4035: PetscNew(&prev->next);
4036: PetscStrallocpy(package,&prev->next->name);
4037: PetscNew(&prev->next->handlers);
4038: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4039: prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4040: return(0);
4041: }
4045: /*@C
4046: MatSolvePackageGet - Get's the function that creates the factor matrix if it exist
4048: Input Parameters:
4049: + package - name of the package, for example petsc or superlu
4050: . ftype - the type of factorization supported by the package
4051: - mtype - the matrix type that works with this package
4053: Output Parameters:
4054: + foundpackage - PETSC_TRUE if the package was registered
4055: . foundmtype - PETSC_TRUE if the package supports the requested mtype
4056: - getfactor - routine that will create the factored matrix ready to be used or NULL if not found
4058: Level: intermediate
4060: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4061: @*/
4062: PetscErrorCode MatSolverPackageGet(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4063: {
4064: PetscErrorCode ierr;
4065: MatSolverPackageHolder next = MatSolverPackageHolders;
4066: PetscBool flg;
4067: MatSolverPackageForSpecifcType inext;
4070: if (foundpackage) *foundpackage = PETSC_FALSE;
4071: if (foundmtype) *foundmtype = PETSC_FALSE;
4072: if (getfactor) *getfactor = NULL;
4073: while (next) {
4074: PetscStrcasecmp(package,next->name,&flg);
4075: if (flg) {
4076: if (foundpackage) *foundpackage = PETSC_TRUE;
4077: inext = next->handlers;
4078: while (inext) {
4079: PetscStrcasecmp(mtype,inext->mtype,&flg);
4080: if (flg) {
4081: if (foundmtype) *foundmtype = PETSC_TRUE;
4082: if (getfactor) *getfactor = inext->getfactor[(int)ftype-1];
4083: return(0);
4084: }
4085: inext = inext->next;
4086: }
4087: }
4088: next = next->next;
4089: }
4090: return(0);
4091: }
4095: PetscErrorCode MatSolverPackageDestroy(void)
4096: {
4097: PetscErrorCode ierr;
4098: MatSolverPackageHolder next = MatSolverPackageHolders,prev;
4099: MatSolverPackageForSpecifcType inext,iprev;
4102: while (next) {
4103: PetscFree(next->name);
4104: inext = next->handlers;
4105: while (inext) {
4106: PetscFree(inext->mtype);
4107: iprev = inext;
4108: inext = inext->next;
4109: PetscFree(iprev);
4110: }
4111: prev = next;
4112: next = next->next;
4113: PetscFree(prev);
4114: }
4115: MatSolverPackageHolders = NULL;
4116: return(0);
4117: }
4121: /*@C
4122: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4124: Collective on Mat
4126: Input Parameters:
4127: + mat - the matrix
4128: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4129: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4131: Output Parameters:
4132: . f - the factor matrix used with MatXXFactorSymbolic() calls
4134: Notes:
4135: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4136: such as pastix, superlu, mumps etc.
4138: PETSc must have been ./configure to use the external solver, using the option --download-package
4140: Level: intermediate
4142: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4143: @*/
4144: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
4145: {
4146: PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4147: PetscBool foundpackage,foundmtype;
4153: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4154: MatCheckPreallocated(mat,1);
4156: MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4157: if (!foundpackage) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4158: if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4159: if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support factorization type %s for matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4161: (*conv)(mat,ftype,f);
4162: return(0);
4163: }
4167: /*@C
4168: MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type
4170: Not Collective
4172: Input Parameters:
4173: + mat - the matrix
4174: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4175: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4177: Output Parameter:
4178: . flg - PETSC_TRUE if the factorization is available
4180: Notes:
4181: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4182: such as pastix, superlu, mumps etc.
4184: PETSc must have been ./configure to use the external solver, using the option --download-package
4186: Level: intermediate
4188: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4189: @*/
4190: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool *flg)
4191: {
4192: PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);
4198: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4199: MatCheckPreallocated(mat,1);
4201: *flg = PETSC_FALSE;
4202: MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4203: if (gconv) {
4204: *flg = PETSC_TRUE;
4205: }
4206: return(0);
4207: }
4209: #include <petscdmtypes.h>
4213: /*@
4214: MatDuplicate - Duplicates a matrix including the non-zero structure.
4216: Collective on Mat
4218: Input Parameters:
4219: + mat - the matrix
4220: - op - either MAT_DO_NOT_COPY_VALUES or MAT_COPY_VALUES, cause it to copy the numerical values in the matrix
4221: MAT_SHARE_NONZERO_PATTERN to share the nonzero patterns with the previous matrix and not copy them.
4223: Output Parameter:
4224: . M - pointer to place new matrix
4226: Level: intermediate
4228: Concepts: matrices^duplicating
4230: Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4232: .seealso: MatCopy(), MatConvert()
4233: @*/
4234: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4235: {
4237: Mat B;
4238: PetscInt i;
4239: DM dm;
4245: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4246: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4247: MatCheckPreallocated(mat,1);
4249: *M = 0;
4250: if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4251: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4252: (*mat->ops->duplicate)(mat,op,M);
4253: B = *M;
4255: B->stencil.dim = mat->stencil.dim;
4256: B->stencil.noc = mat->stencil.noc;
4257: for (i=0; i<=mat->stencil.dim; i++) {
4258: B->stencil.dims[i] = mat->stencil.dims[i];
4259: B->stencil.starts[i] = mat->stencil.starts[i];
4260: }
4262: B->nooffproczerorows = mat->nooffproczerorows;
4263: B->nooffprocentries = mat->nooffprocentries;
4265: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4266: if (dm) {
4267: PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4268: }
4269: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4270: PetscObjectStateIncrease((PetscObject)B);
4271: return(0);
4272: }
4276: /*@
4277: MatGetDiagonal - Gets the diagonal of a matrix.
4279: Logically Collective on Mat and Vec
4281: Input Parameters:
4282: + mat - the matrix
4283: - v - the vector for storing the diagonal
4285: Output Parameter:
4286: . v - the diagonal of the matrix
4288: Level: intermediate
4290: Note:
4291: Currently only correct in parallel for square matrices.
4293: Concepts: matrices^accessing diagonals
4295: .seealso: MatGetRow(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs()
4296: @*/
4297: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4298: {
4305: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4306: if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4307: MatCheckPreallocated(mat,1);
4309: (*mat->ops->getdiagonal)(mat,v);
4310: PetscObjectStateIncrease((PetscObject)v);
4311: return(0);
4312: }
4316: /*@C
4317: MatGetRowMin - Gets the minimum value (of the real part) of each
4318: row of the matrix
4320: Logically Collective on Mat and Vec
4322: Input Parameters:
4323: . mat - the matrix
4325: Output Parameter:
4326: + v - the vector for storing the maximums
4327: - idx - the indices of the column found for each row (optional)
4329: Level: intermediate
4331: Notes: The result of this call are the same as if one converted the matrix to dense format
4332: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4334: This code is only implemented for a couple of matrix formats.
4336: Concepts: matrices^getting row maximums
4338: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(),
4339: MatGetRowMax()
4340: @*/
4341: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4342: {
4349: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4350: if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4351: MatCheckPreallocated(mat,1);
4353: (*mat->ops->getrowmin)(mat,v,idx);
4354: PetscObjectStateIncrease((PetscObject)v);
4355: return(0);
4356: }
4360: /*@C
4361: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4362: row of the matrix
4364: Logically Collective on Mat and Vec
4366: Input Parameters:
4367: . mat - the matrix
4369: Output Parameter:
4370: + v - the vector for storing the minimums
4371: - idx - the indices of the column found for each row (or NULL if not needed)
4373: Level: intermediate
4375: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4376: row is 0 (the first column).
4378: This code is only implemented for a couple of matrix formats.
4380: Concepts: matrices^getting row maximums
4382: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4383: @*/
4384: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4385: {
4392: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4393: if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4394: MatCheckPreallocated(mat,1);
4395: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4397: (*mat->ops->getrowminabs)(mat,v,idx);
4398: PetscObjectStateIncrease((PetscObject)v);
4399: return(0);
4400: }
4404: /*@C
4405: MatGetRowMax - Gets the maximum value (of the real part) of each
4406: row of the matrix
4408: Logically Collective on Mat and Vec
4410: Input Parameters:
4411: . mat - the matrix
4413: Output Parameter:
4414: + v - the vector for storing the maximums
4415: - idx - the indices of the column found for each row (optional)
4417: Level: intermediate
4419: Notes: The result of this call are the same as if one converted the matrix to dense format
4420: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4422: This code is only implemented for a couple of matrix formats.
4424: Concepts: matrices^getting row maximums
4426: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4427: @*/
4428: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4429: {
4436: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4437: if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4438: MatCheckPreallocated(mat,1);
4440: (*mat->ops->getrowmax)(mat,v,idx);
4441: PetscObjectStateIncrease((PetscObject)v);
4442: return(0);
4443: }
4447: /*@C
4448: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4449: row of the matrix
4451: Logically Collective on Mat and Vec
4453: Input Parameters:
4454: . mat - the matrix
4456: Output Parameter:
4457: + v - the vector for storing the maximums
4458: - idx - the indices of the column found for each row (or NULL if not needed)
4460: Level: intermediate
4462: Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4463: row is 0 (the first column).
4465: This code is only implemented for a couple of matrix formats.
4467: Concepts: matrices^getting row maximums
4469: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4470: @*/
4471: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4472: {
4479: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4480: if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4481: MatCheckPreallocated(mat,1);
4482: if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}
4484: (*mat->ops->getrowmaxabs)(mat,v,idx);
4485: PetscObjectStateIncrease((PetscObject)v);
4486: return(0);
4487: }
4491: /*@
4492: MatGetRowSum - Gets the sum of each row of the matrix
4494: Logically Collective on Mat and Vec
4496: Input Parameters:
4497: . mat - the matrix
4499: Output Parameter:
4500: . v - the vector for storing the sum of rows
4502: Level: intermediate
4504: Notes: This code is slow since it is not currently specialized for different formats
4506: Concepts: matrices^getting row sums
4508: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4509: @*/
4510: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4511: {
4512: PetscInt start = 0, end = 0, row;
4513: PetscScalar *array;
4520: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4521: MatCheckPreallocated(mat,1);
4522: MatGetOwnershipRange(mat, &start, &end);
4523: VecGetArray(v, &array);
4524: for (row = start; row < end; ++row) {
4525: PetscInt ncols, col;
4526: const PetscInt *cols;
4527: const PetscScalar *vals;
4529: array[row - start] = 0.0;
4531: MatGetRow(mat, row, &ncols, &cols, &vals);
4532: for (col = 0; col < ncols; col++) {
4533: array[row - start] += vals[col];
4534: }
4535: MatRestoreRow(mat, row, &ncols, &cols, &vals);
4536: }
4537: VecRestoreArray(v, &array);
4538: PetscObjectStateIncrease((PetscObject) v);
4539: return(0);
4540: }
4544: /*@
4545: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4547: Collective on Mat
4549: Input Parameter:
4550: + mat - the matrix to transpose
4551: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4553: Output Parameters:
4554: . B - the transpose
4556: Notes:
4557: If you pass in &mat for B the transpose will be done in place, for example MatTranspose(mat,MAT_REUSE_MATRIX,&mat);
4559: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4561: Level: intermediate
4563: Concepts: matrices^transposing
4565: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4566: @*/
4567: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4568: {
4574: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4575: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4576: if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4577: MatCheckPreallocated(mat,1);
4579: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4580: (*mat->ops->transpose)(mat,reuse,B);
4581: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4582: if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4583: return(0);
4584: }
4588: /*@
4589: MatIsTranspose - Test whether a matrix is another one's transpose,
4590: or its own, in which case it tests symmetry.
4592: Collective on Mat
4594: Input Parameter:
4595: + A - the matrix to test
4596: - B - the matrix to test against, this can equal the first parameter
4598: Output Parameters:
4599: . flg - the result
4601: Notes:
4602: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4603: has a running time of the order of the number of nonzeros; the parallel
4604: test involves parallel copies of the block-offdiagonal parts of the matrix.
4606: Level: intermediate
4608: Concepts: matrices^transposing, matrix^symmetry
4610: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4611: @*/
4612: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4613: {
4614: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4620: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4621: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4622: *flg = PETSC_FALSE;
4623: if (f && g) {
4624: if (f == g) {
4625: (*f)(A,B,tol,flg);
4626: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4627: } else {
4628: MatType mattype;
4629: if (!f) {
4630: MatGetType(A,&mattype);
4631: } else {
4632: MatGetType(B,&mattype);
4633: }
4634: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4635: }
4636: return(0);
4637: }
4641: /*@
4642: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4644: Collective on Mat
4646: Input Parameter:
4647: + mat - the matrix to transpose and complex conjugate
4648: - reuse - store the transpose matrix in the provided B
4650: Output Parameters:
4651: . B - the Hermitian
4653: Notes:
4654: If you pass in &mat for B the Hermitian will be done in place
4656: Level: intermediate
4658: Concepts: matrices^transposing, complex conjugatex
4660: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4661: @*/
4662: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4663: {
4667: MatTranspose(mat,reuse,B);
4668: #if defined(PETSC_USE_COMPLEX)
4669: MatConjugate(*B);
4670: #endif
4671: return(0);
4672: }
4676: /*@
4677: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4679: Collective on Mat
4681: Input Parameter:
4682: + A - the matrix to test
4683: - B - the matrix to test against, this can equal the first parameter
4685: Output Parameters:
4686: . flg - the result
4688: Notes:
4689: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4690: has a running time of the order of the number of nonzeros; the parallel
4691: test involves parallel copies of the block-offdiagonal parts of the matrix.
4693: Level: intermediate
4695: Concepts: matrices^transposing, matrix^symmetry
4697: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4698: @*/
4699: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4700: {
4701: PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4707: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4708: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4709: if (f && g) {
4710: if (f==g) {
4711: (*f)(A,B,tol,flg);
4712: } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4713: }
4714: return(0);
4715: }
4719: /*@
4720: MatPermute - Creates a new matrix with rows and columns permuted from the
4721: original.
4723: Collective on Mat
4725: Input Parameters:
4726: + mat - the matrix to permute
4727: . row - row permutation, each processor supplies only the permutation for its rows
4728: - col - column permutation, each processor supplies only the permutation for its columns
4730: Output Parameters:
4731: . B - the permuted matrix
4733: Level: advanced
4735: Note:
4736: The index sets map from row/col of permuted matrix to row/col of original matrix.
4737: The index sets should be on the same communicator as Mat and have the same local sizes.
4739: Concepts: matrices^permuting
4741: .seealso: MatGetOrdering(), ISAllGather()
4743: @*/
4744: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4745: {
4754: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4755: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4756: if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4757: MatCheckPreallocated(mat,1);
4759: (*mat->ops->permute)(mat,row,col,B);
4760: PetscObjectStateIncrease((PetscObject)*B);
4761: return(0);
4762: }
4766: /*@
4767: MatEqual - Compares two matrices.
4769: Collective on Mat
4771: Input Parameters:
4772: + A - the first matrix
4773: - B - the second matrix
4775: Output Parameter:
4776: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
4778: Level: intermediate
4780: Concepts: matrices^equality between
4781: @*/
4782: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
4783: {
4793: MatCheckPreallocated(B,2);
4794: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4795: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4796: if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4797: if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4798: if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4799: if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4800: MatCheckPreallocated(A,1);
4802: (*A->ops->equal)(A,B,flg);
4803: return(0);
4804: }
4808: /*@
4809: MatDiagonalScale - Scales a matrix on the left and right by diagonal
4810: matrices that are stored as vectors. Either of the two scaling
4811: matrices can be NULL.
4813: Collective on Mat
4815: Input Parameters:
4816: + mat - the matrix to be scaled
4817: . l - the left scaling vector (or NULL)
4818: - r - the right scaling vector (or NULL)
4820: Notes:
4821: MatDiagonalScale() computes A = LAR, where
4822: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4823: The L scales the rows of the matrix, the R scales the columns of the matrix.
4825: Level: intermediate
4827: Concepts: matrices^diagonal scaling
4828: Concepts: diagonal scaling of matrices
4830: .seealso: MatScale()
4831: @*/
4832: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4833: {
4839: if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4842: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4843: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4844: MatCheckPreallocated(mat,1);
4846: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4847: (*mat->ops->diagonalscale)(mat,l,r);
4848: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4849: PetscObjectStateIncrease((PetscObject)mat);
4850: #if defined(PETSC_HAVE_CUSP)
4851: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4852: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4853: }
4854: #endif
4855: #if defined(PETSC_HAVE_VIENNACL)
4856: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4857: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4858: }
4859: #endif
4860: return(0);
4861: }
4865: /*@
4866: MatScale - Scales all elements of a matrix by a given number.
4868: Logically Collective on Mat
4870: Input Parameters:
4871: + mat - the matrix to be scaled
4872: - a - the scaling value
4874: Output Parameter:
4875: . mat - the scaled matrix
4877: Level: intermediate
4879: Concepts: matrices^scaling all entries
4881: .seealso: MatDiagonalScale()
4882: @*/
4883: PetscErrorCode MatScale(Mat mat,PetscScalar a)
4884: {
4890: if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4891: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4892: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4894: MatCheckPreallocated(mat,1);
4896: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4897: if (a != (PetscScalar)1.0) {
4898: (*mat->ops->scale)(mat,a);
4899: PetscObjectStateIncrease((PetscObject)mat);
4900: }
4901: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4902: #if defined(PETSC_HAVE_CUSP)
4903: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4904: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4905: }
4906: #endif
4907: #if defined(PETSC_HAVE_VIENNACL)
4908: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4909: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4910: }
4911: #endif
4912: return(0);
4913: }
4917: /*@
4918: MatNorm - Calculates various norms of a matrix.
4920: Collective on Mat
4922: Input Parameters:
4923: + mat - the matrix
4924: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
4926: Output Parameters:
4927: . nrm - the resulting norm
4929: Level: intermediate
4931: Concepts: matrices^norm
4932: Concepts: norm^of matrix
4933: @*/
4934: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
4935: {
4943: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4944: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4945: if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4946: MatCheckPreallocated(mat,1);
4948: (*mat->ops->norm)(mat,type,nrm);
4949: return(0);
4950: }
4952: /*
4953: This variable is used to prevent counting of MatAssemblyBegin() that
4954: are called from within a MatAssemblyEnd().
4955: */
4956: static PetscInt MatAssemblyEnd_InUse = 0;
4959: /*@
4960: MatAssemblyBegin - Begins assembling the matrix. This routine should
4961: be called after completing all calls to MatSetValues().
4963: Collective on Mat
4965: Input Parameters:
4966: + mat - the matrix
4967: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
4969: Notes:
4970: MatSetValues() generally caches the values. The matrix is ready to
4971: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
4972: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
4973: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
4974: using the matrix.
4976: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
4977: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
4978: a global collective operation requring all processes that share the matrix.
4980: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
4981: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
4982: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
4984: Level: beginner
4986: Concepts: matrices^assembling
4988: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
4989: @*/
4990: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
4991: {
4997: MatCheckPreallocated(mat,1);
4998: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
4999: if (mat->assembled) {
5000: mat->was_assembled = PETSC_TRUE;
5001: mat->assembled = PETSC_FALSE;
5002: }
5003: if (!MatAssemblyEnd_InUse) {
5004: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5005: if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5006: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5007: } else if (mat->ops->assemblybegin) {
5008: (*mat->ops->assemblybegin)(mat,type);
5009: }
5010: return(0);
5011: }
5015: /*@
5016: MatAssembled - Indicates if a matrix has been assembled and is ready for
5017: use; for example, in matrix-vector product.
5019: Not Collective
5021: Input Parameter:
5022: . mat - the matrix
5024: Output Parameter:
5025: . assembled - PETSC_TRUE or PETSC_FALSE
5027: Level: advanced
5029: Concepts: matrices^assembled?
5031: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5032: @*/
5033: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5034: {
5039: *assembled = mat->assembled;
5040: return(0);
5041: }
5045: /*@
5046: MatAssemblyEnd - Completes assembling the matrix. This routine should
5047: be called after MatAssemblyBegin().
5049: Collective on Mat
5051: Input Parameters:
5052: + mat - the matrix
5053: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5055: Options Database Keys:
5056: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5057: . -mat_view ::ascii_info_detail - Prints more detailed info
5058: . -mat_view - Prints matrix in ASCII format
5059: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5060: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5061: . -display <name> - Sets display name (default is host)
5062: . -draw_pause <sec> - Sets number of seconds to pause after display
5063: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 11 Using MATLAB with PETSc )
5064: . -viewer_socket_machine <machine> - Machine to use for socket
5065: . -viewer_socket_port <port> - Port number to use for socket
5066: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5068: Notes:
5069: MatSetValues() generally caches the values. The matrix is ready to
5070: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5071: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5072: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5073: using the matrix.
5075: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5076: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5077: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5079: Level: beginner
5081: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5082: @*/
5083: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5084: {
5085: PetscErrorCode ierr;
5086: static PetscInt inassm = 0;
5087: PetscBool flg = PETSC_FALSE;
5093: inassm++;
5094: MatAssemblyEnd_InUse++;
5095: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5096: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5097: if (mat->ops->assemblyend) {
5098: (*mat->ops->assemblyend)(mat,type);
5099: }
5100: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5101: } else if (mat->ops->assemblyend) {
5102: (*mat->ops->assemblyend)(mat,type);
5103: }
5105: /* Flush assembly is not a true assembly */
5106: if (type != MAT_FLUSH_ASSEMBLY) {
5107: mat->assembled = PETSC_TRUE; mat->num_ass++;
5108: }
5109: mat->insertmode = NOT_SET_VALUES;
5110: MatAssemblyEnd_InUse--;
5111: PetscObjectStateIncrease((PetscObject)mat);
5112: if (!mat->symmetric_eternal) {
5113: mat->symmetric_set = PETSC_FALSE;
5114: mat->hermitian_set = PETSC_FALSE;
5115: mat->structurally_symmetric_set = PETSC_FALSE;
5116: }
5117: #if defined(PETSC_HAVE_CUSP)
5118: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5119: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5120: }
5121: #endif
5122: #if defined(PETSC_HAVE_VIENNACL)
5123: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5124: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5125: }
5126: #endif
5127: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5128: MatViewFromOptions(mat,NULL,"-mat_view");
5130: if (mat->checksymmetryonassembly) {
5131: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5132: if (flg) {
5133: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5134: } else {
5135: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5136: }
5137: }
5138: if (mat->nullsp && mat->checknullspaceonassembly) {
5139: MatNullSpaceTest(mat->nullsp,mat,NULL);
5140: }
5141: }
5142: inassm--;
5143: return(0);
5144: }
5148: /*@
5149: MatSetOption - Sets a parameter option for a matrix. Some options
5150: may be specific to certain storage formats. Some options
5151: determine how values will be inserted (or added). Sorted,
5152: row-oriented input will generally assemble the fastest. The default
5153: is row-oriented.
5155: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5157: Input Parameters:
5158: + mat - the matrix
5159: . option - the option, one of those listed below (and possibly others),
5160: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5162: Options Describing Matrix Structure:
5163: + MAT_SPD - symmetric positive definite
5164: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5165: . MAT_HERMITIAN - transpose is the complex conjugation
5166: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5167: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5168: you set to be kept with all future use of the matrix
5169: including after MatAssemblyBegin/End() which could
5170: potentially change the symmetry structure, i.e. you
5171: KNOW the matrix will ALWAYS have the property you set.
5174: Options For Use with MatSetValues():
5175: Insert a logically dense subblock, which can be
5176: . MAT_ROW_ORIENTED - row-oriented (default)
5178: Note these options reflect the data you pass in with MatSetValues(); it has
5179: nothing to do with how the data is stored internally in the matrix
5180: data structure.
5182: When (re)assembling a matrix, we can restrict the input for
5183: efficiency/debugging purposes. These options include:
5184: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5185: . MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5186: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5187: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5188: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5189: + MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5190: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5191: performance for very large process counts.
5193: Notes:
5194: Some options are relevant only for particular matrix types and
5195: are thus ignored by others. Other options are not supported by
5196: certain matrix types and will generate an error message if set.
5198: If using a Fortran 77 module to compute a matrix, one may need to
5199: use the column-oriented option (or convert to the row-oriented
5200: format).
5202: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5203: that would generate a new entry in the nonzero structure is instead
5204: ignored. Thus, if memory has not alredy been allocated for this particular
5205: data, then the insertion is ignored. For dense matrices, in which
5206: the entire array is allocated, no entries are ever ignored.
5207: Set after the first MatAssemblyEnd()
5209: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5210: that would generate a new entry in the nonzero structure instead produces
5211: an error. (Currently supported for AIJ and BAIJ formats only.)
5213: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5214: that would generate a new entry that has not been preallocated will
5215: instead produce an error. (Currently supported for AIJ and BAIJ formats
5216: only.) This is a useful flag when debugging matrix memory preallocation.
5218: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5219: other processors should be dropped, rather than stashed.
5220: This is useful if you know that the "owning" processor is also
5221: always generating the correct matrix entries, so that PETSc need
5222: not transfer duplicate entries generated on another processor.
5224: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5225: searches during matrix assembly. When this flag is set, the hash table
5226: is created during the first Matrix Assembly. This hash table is
5227: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5228: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5229: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5230: supported by MATMPIBAIJ format only.
5232: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5233: are kept in the nonzero structure
5235: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5236: a zero location in the matrix
5238: MAT_USE_INODES - indicates using inode version of the code - works with AIJ and
5239: ROWBS matrix types
5241: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5242: zero row routines and thus improves performance for very large process counts.
5244: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5245: part of the matrix (since they should match the upper triangular part).
5247: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5249: Level: intermediate
5251: Concepts: matrices^setting options
5253: .seealso: MatOption, Mat
5255: @*/
5256: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5257: {
5263: if (op > 0) {
5266: }
5268: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5269: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");
5271: switch (op) {
5272: case MAT_NO_OFF_PROC_ENTRIES:
5273: mat->nooffprocentries = flg;
5274: return(0);
5275: break;
5276: case MAT_NO_OFF_PROC_ZERO_ROWS:
5277: mat->nooffproczerorows = flg;
5278: return(0);
5279: break;
5280: case MAT_SPD:
5281: mat->spd_set = PETSC_TRUE;
5282: mat->spd = flg;
5283: if (flg) {
5284: mat->symmetric = PETSC_TRUE;
5285: mat->structurally_symmetric = PETSC_TRUE;
5286: mat->symmetric_set = PETSC_TRUE;
5287: mat->structurally_symmetric_set = PETSC_TRUE;
5288: }
5289: break;
5290: case MAT_SYMMETRIC:
5291: mat->symmetric = flg;
5292: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5293: mat->symmetric_set = PETSC_TRUE;
5294: mat->structurally_symmetric_set = flg;
5295: break;
5296: case MAT_HERMITIAN:
5297: mat->hermitian = flg;
5298: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5299: mat->hermitian_set = PETSC_TRUE;
5300: mat->structurally_symmetric_set = flg;
5301: break;
5302: case MAT_STRUCTURALLY_SYMMETRIC:
5303: mat->structurally_symmetric = flg;
5304: mat->structurally_symmetric_set = PETSC_TRUE;
5305: break;
5306: case MAT_SYMMETRY_ETERNAL:
5307: mat->symmetric_eternal = flg;
5308: break;
5309: default:
5310: break;
5311: }
5312: if (mat->ops->setoption) {
5313: (*mat->ops->setoption)(mat,op,flg);
5314: }
5315: return(0);
5316: }
5320: /*@
5321: MatGetOption - Gets a parameter option that has been set for a matrix.
5323: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5325: Input Parameters:
5326: + mat - the matrix
5327: - option - the option, this only responds to certain options, check the code for which ones
5329: Output Parameter:
5330: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5332: Notes: Can only be called after MatSetSizes() and MatSetType() have been set.
5334: Level: intermediate
5336: Concepts: matrices^setting options
5338: .seealso: MatOption, MatSetOption()
5340: @*/
5341: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5342: {
5347: if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5348: if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");
5350: switch (op) {
5351: case MAT_NO_OFF_PROC_ENTRIES:
5352: *flg = mat->nooffprocentries;
5353: break;
5354: case MAT_NO_OFF_PROC_ZERO_ROWS:
5355: *flg = mat->nooffproczerorows;
5356: break;
5357: case MAT_SYMMETRIC:
5358: *flg = mat->symmetric;
5359: break;
5360: case MAT_HERMITIAN:
5361: *flg = mat->hermitian;
5362: break;
5363: case MAT_STRUCTURALLY_SYMMETRIC:
5364: *flg = mat->structurally_symmetric;
5365: break;
5366: case MAT_SYMMETRY_ETERNAL:
5367: *flg = mat->symmetric_eternal;
5368: break;
5369: default:
5370: break;
5371: }
5372: return(0);
5373: }
5377: /*@
5378: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5379: this routine retains the old nonzero structure.
5381: Logically Collective on Mat
5383: Input Parameters:
5384: . mat - the matrix
5386: Level: intermediate
5388: Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5389: See the Performance chapter of the users manual for information on preallocating matrices.
5391: Concepts: matrices^zeroing
5393: .seealso: MatZeroRows()
5394: @*/
5395: PetscErrorCode MatZeroEntries(Mat mat)
5396: {
5402: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5403: if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5404: if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5405: MatCheckPreallocated(mat,1);
5407: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5408: (*mat->ops->zeroentries)(mat);
5409: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5410: PetscObjectStateIncrease((PetscObject)mat);
5411: #if defined(PETSC_HAVE_CUSP)
5412: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5413: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5414: }
5415: #endif
5416: #if defined(PETSC_HAVE_VIENNACL)
5417: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5418: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5419: }
5420: #endif
5421: return(0);
5422: }
5426: /*@C
5427: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5428: of a set of rows and columns of a matrix.
5430: Collective on Mat
5432: Input Parameters:
5433: + mat - the matrix
5434: . numRows - the number of rows to remove
5435: . rows - the global row indices
5436: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5437: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5438: - b - optional vector of right hand side, that will be adjusted by provided solution
5440: Notes:
5441: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5443: The user can set a value in the diagonal entry (or for the AIJ and
5444: row formats can optionally remove the main diagonal entry from the
5445: nonzero structure as well, by passing 0.0 as the final argument).
5447: For the parallel case, all processes that share the matrix (i.e.,
5448: those in the communicator used for matrix creation) MUST call this
5449: routine, regardless of whether any rows being zeroed are owned by
5450: them.
5452: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5453: list only rows local to itself).
5455: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5457: Level: intermediate
5459: Concepts: matrices^zeroing rows
5461: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumnsIS()
5462: @*/
5463: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5464: {
5471: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5472: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5473: if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5474: MatCheckPreallocated(mat,1);
5476: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5477: MatViewFromOptions(mat,NULL,"-mat_view");
5478: PetscObjectStateIncrease((PetscObject)mat);
5479: #if defined(PETSC_HAVE_CUSP)
5480: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5481: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5482: }
5483: #endif
5484: #if defined(PETSC_HAVE_VIENNACL)
5485: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5486: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5487: }
5488: #endif
5489: return(0);
5490: }
5494: /*@C
5495: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5496: of a set of rows and columns of a matrix.
5498: Collective on Mat
5500: Input Parameters:
5501: + mat - the matrix
5502: . is - the rows to zero
5503: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5504: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5505: - b - optional vector of right hand side, that will be adjusted by provided solution
5507: Notes:
5508: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5510: The user can set a value in the diagonal entry (or for the AIJ and
5511: row formats can optionally remove the main diagonal entry from the
5512: nonzero structure as well, by passing 0.0 as the final argument).
5514: For the parallel case, all processes that share the matrix (i.e.,
5515: those in the communicator used for matrix creation) MUST call this
5516: routine, regardless of whether any rows being zeroed are owned by
5517: them.
5519: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5520: list only rows local to itself).
5522: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5524: Level: intermediate
5526: Concepts: matrices^zeroing rows
5528: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumns()
5529: @*/
5530: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5531: {
5533: PetscInt numRows;
5534: const PetscInt *rows;
5541: ISGetLocalSize(is,&numRows);
5542: ISGetIndices(is,&rows);
5543: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5544: ISRestoreIndices(is,&rows);
5545: return(0);
5546: }
5550: /*@C
5551: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5552: of a set of rows of a matrix.
5554: Collective on Mat
5556: Input Parameters:
5557: + mat - the matrix
5558: . numRows - the number of rows to remove
5559: . rows - the global row indices
5560: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5561: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5562: - b - optional vector of right hand side, that will be adjusted by provided solution
5564: Notes:
5565: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5566: but does not release memory. For the dense and block diagonal
5567: formats this does not alter the nonzero structure.
5569: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5570: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5571: merely zeroed.
5573: The user can set a value in the diagonal entry (or for the AIJ and
5574: row formats can optionally remove the main diagonal entry from the
5575: nonzero structure as well, by passing 0.0 as the final argument).
5577: For the parallel case, all processes that share the matrix (i.e.,
5578: those in the communicator used for matrix creation) MUST call this
5579: routine, regardless of whether any rows being zeroed are owned by
5580: them.
5582: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5583: list only rows local to itself).
5585: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5586: owns that are to be zeroed. This saves a global synchronization in the implementation.
5588: Level: intermediate
5590: Concepts: matrices^zeroing rows
5592: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5593: @*/
5594: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5595: {
5602: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5603: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5604: if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5605: MatCheckPreallocated(mat,1);
5607: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5608: MatViewFromOptions(mat,NULL,"-mat_view");
5609: PetscObjectStateIncrease((PetscObject)mat);
5610: #if defined(PETSC_HAVE_CUSP)
5611: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5612: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5613: }
5614: #endif
5615: #if defined(PETSC_HAVE_VIENNACL)
5616: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5617: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5618: }
5619: #endif
5620: return(0);
5621: }
5625: /*@C
5626: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5627: of a set of rows of a matrix.
5629: Collective on Mat
5631: Input Parameters:
5632: + mat - the matrix
5633: . is - index set of rows to remove
5634: . diag - value put in all diagonals of eliminated rows
5635: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5636: - b - optional vector of right hand side, that will be adjusted by provided solution
5638: Notes:
5639: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5640: but does not release memory. For the dense and block diagonal
5641: formats this does not alter the nonzero structure.
5643: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5644: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5645: merely zeroed.
5647: The user can set a value in the diagonal entry (or for the AIJ and
5648: row formats can optionally remove the main diagonal entry from the
5649: nonzero structure as well, by passing 0.0 as the final argument).
5651: For the parallel case, all processes that share the matrix (i.e.,
5652: those in the communicator used for matrix creation) MUST call this
5653: routine, regardless of whether any rows being zeroed are owned by
5654: them.
5656: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5657: list only rows local to itself).
5659: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5660: owns that are to be zeroed. This saves a global synchronization in the implementation.
5662: Level: intermediate
5664: Concepts: matrices^zeroing rows
5666: .seealso: MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5667: @*/
5668: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5669: {
5670: PetscInt numRows;
5671: const PetscInt *rows;
5678: ISGetLocalSize(is,&numRows);
5679: ISGetIndices(is,&rows);
5680: MatZeroRows(mat,numRows,rows,diag,x,b);
5681: ISRestoreIndices(is,&rows);
5682: return(0);
5683: }
5687: /*@C
5688: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5689: of a set of rows of a matrix. These rows must be local to the process.
5691: Collective on Mat
5693: Input Parameters:
5694: + mat - the matrix
5695: . numRows - the number of rows to remove
5696: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5697: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5698: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5699: - b - optional vector of right hand side, that will be adjusted by provided solution
5701: Notes:
5702: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5703: but does not release memory. For the dense and block diagonal
5704: formats this does not alter the nonzero structure.
5706: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5707: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5708: merely zeroed.
5710: The user can set a value in the diagonal entry (or for the AIJ and
5711: row formats can optionally remove the main diagonal entry from the
5712: nonzero structure as well, by passing 0.0 as the final argument).
5714: For the parallel case, all processes that share the matrix (i.e.,
5715: those in the communicator used for matrix creation) MUST call this
5716: routine, regardless of whether any rows being zeroed are owned by
5717: them.
5719: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5720: list only rows local to itself).
5722: The grid coordinates are across the entire grid, not just the local portion
5724: In Fortran idxm and idxn should be declared as
5725: $ MatStencil idxm(4,m)
5726: and the values inserted using
5727: $ idxm(MatStencil_i,1) = i
5728: $ idxm(MatStencil_j,1) = j
5729: $ idxm(MatStencil_k,1) = k
5730: $ idxm(MatStencil_c,1) = c
5731: etc
5733: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5734: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5735: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5736: DM_BOUNDARY_PERIODIC boundary type.
5738: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5739: a single value per point) you can skip filling those indices.
5741: Level: intermediate
5743: Concepts: matrices^zeroing rows
5745: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5746: @*/
5747: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5748: {
5749: PetscInt dim = mat->stencil.dim;
5750: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5751: PetscInt *dims = mat->stencil.dims+1;
5752: PetscInt *starts = mat->stencil.starts;
5753: PetscInt *dxm = (PetscInt*) rows;
5754: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5762: PetscMalloc1(numRows, &jdxm);
5763: for (i = 0; i < numRows; ++i) {
5764: /* Skip unused dimensions (they are ordered k, j, i, c) */
5765: for (j = 0; j < 3-sdim; ++j) dxm++;
5766: /* Local index in X dir */
5767: tmp = *dxm++ - starts[0];
5768: /* Loop over remaining dimensions */
5769: for (j = 0; j < dim-1; ++j) {
5770: /* If nonlocal, set index to be negative */
5771: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5772: /* Update local index */
5773: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5774: }
5775: /* Skip component slot if necessary */
5776: if (mat->stencil.noc) dxm++;
5777: /* Local row number */
5778: if (tmp >= 0) {
5779: jdxm[numNewRows++] = tmp;
5780: }
5781: }
5782: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5783: PetscFree(jdxm);
5784: return(0);
5785: }
5789: /*@C
5790: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5791: of a set of rows and columns of a matrix.
5793: Collective on Mat
5795: Input Parameters:
5796: + mat - the matrix
5797: . numRows - the number of rows/columns to remove
5798: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5799: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5800: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5801: - b - optional vector of right hand side, that will be adjusted by provided solution
5803: Notes:
5804: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5805: but does not release memory. For the dense and block diagonal
5806: formats this does not alter the nonzero structure.
5808: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5809: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5810: merely zeroed.
5812: The user can set a value in the diagonal entry (or for the AIJ and
5813: row formats can optionally remove the main diagonal entry from the
5814: nonzero structure as well, by passing 0.0 as the final argument).
5816: For the parallel case, all processes that share the matrix (i.e.,
5817: those in the communicator used for matrix creation) MUST call this
5818: routine, regardless of whether any rows being zeroed are owned by
5819: them.
5821: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5822: list only rows local to itself, but the row/column numbers are given in local numbering).
5824: The grid coordinates are across the entire grid, not just the local portion
5826: In Fortran idxm and idxn should be declared as
5827: $ MatStencil idxm(4,m)
5828: and the values inserted using
5829: $ idxm(MatStencil_i,1) = i
5830: $ idxm(MatStencil_j,1) = j
5831: $ idxm(MatStencil_k,1) = k
5832: $ idxm(MatStencil_c,1) = c
5833: etc
5835: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5836: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5837: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5838: DM_BOUNDARY_PERIODIC boundary type.
5840: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5841: a single value per point) you can skip filling those indices.
5843: Level: intermediate
5845: Concepts: matrices^zeroing rows
5847: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5848: @*/
5849: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5850: {
5851: PetscInt dim = mat->stencil.dim;
5852: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5853: PetscInt *dims = mat->stencil.dims+1;
5854: PetscInt *starts = mat->stencil.starts;
5855: PetscInt *dxm = (PetscInt*) rows;
5856: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5864: PetscMalloc1(numRows, &jdxm);
5865: for (i = 0; i < numRows; ++i) {
5866: /* Skip unused dimensions (they are ordered k, j, i, c) */
5867: for (j = 0; j < 3-sdim; ++j) dxm++;
5868: /* Local index in X dir */
5869: tmp = *dxm++ - starts[0];
5870: /* Loop over remaining dimensions */
5871: for (j = 0; j < dim-1; ++j) {
5872: /* If nonlocal, set index to be negative */
5873: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5874: /* Update local index */
5875: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5876: }
5877: /* Skip component slot if necessary */
5878: if (mat->stencil.noc) dxm++;
5879: /* Local row number */
5880: if (tmp >= 0) {
5881: jdxm[numNewRows++] = tmp;
5882: }
5883: }
5884: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
5885: PetscFree(jdxm);
5886: return(0);
5887: }
5891: /*@C
5892: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
5893: of a set of rows of a matrix; using local numbering of rows.
5895: Collective on Mat
5897: Input Parameters:
5898: + mat - the matrix
5899: . numRows - the number of rows to remove
5900: . rows - the global row indices
5901: . diag - value put in all diagonals of eliminated rows
5902: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5903: - b - optional vector of right hand side, that will be adjusted by provided solution
5905: Notes:
5906: Before calling MatZeroRowsLocal(), the user must first set the
5907: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5909: For the AIJ matrix formats this removes the old nonzero structure,
5910: but does not release memory. For the dense and block diagonal
5911: formats this does not alter the nonzero structure.
5913: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5914: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5915: merely zeroed.
5917: The user can set a value in the diagonal entry (or for the AIJ and
5918: row formats can optionally remove the main diagonal entry from the
5919: nonzero structure as well, by passing 0.0 as the final argument).
5921: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5922: owns that are to be zeroed. This saves a global synchronization in the implementation.
5924: Level: intermediate
5926: Concepts: matrices^zeroing
5928: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
5929: @*/
5930: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5931: {
5938: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5939: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5940: MatCheckPreallocated(mat,1);
5942: if (mat->ops->zerorowslocal) {
5943: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
5944: } else {
5945: IS is, newis;
5946: const PetscInt *newRows;
5948: if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
5949: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
5950: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
5951: ISGetIndices(newis,&newRows);
5952: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
5953: ISRestoreIndices(newis,&newRows);
5954: ISDestroy(&newis);
5955: ISDestroy(&is);
5956: }
5957: PetscObjectStateIncrease((PetscObject)mat);
5958: #if defined(PETSC_HAVE_CUSP)
5959: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5960: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5961: }
5962: #endif
5963: #if defined(PETSC_HAVE_VIENNACL)
5964: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5965: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5966: }
5967: #endif
5968: return(0);
5969: }
5973: /*@C
5974: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
5975: of a set of rows of a matrix; using local numbering of rows.
5977: Collective on Mat
5979: Input Parameters:
5980: + mat - the matrix
5981: . is - index set of rows to remove
5982: . diag - value put in all diagonals of eliminated rows
5983: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5984: - b - optional vector of right hand side, that will be adjusted by provided solution
5986: Notes:
5987: Before calling MatZeroRowsLocalIS(), the user must first set the
5988: local-to-global mapping by calling MatSetLocalToGlobalMapping().
5990: For the AIJ matrix formats this removes the old nonzero structure,
5991: but does not release memory. For the dense and block diagonal
5992: formats this does not alter the nonzero structure.
5994: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5995: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5996: merely zeroed.
5998: The user can set a value in the diagonal entry (or for the AIJ and
5999: row formats can optionally remove the main diagonal entry from the
6000: nonzero structure as well, by passing 0.0 as the final argument).
6002: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6003: owns that are to be zeroed. This saves a global synchronization in the implementation.
6005: Level: intermediate
6007: Concepts: matrices^zeroing
6009: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6010: @*/
6011: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6012: {
6014: PetscInt numRows;
6015: const PetscInt *rows;
6021: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6022: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6023: MatCheckPreallocated(mat,1);
6025: ISGetLocalSize(is,&numRows);
6026: ISGetIndices(is,&rows);
6027: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6028: ISRestoreIndices(is,&rows);
6029: return(0);
6030: }
6034: /*@C
6035: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6036: of a set of rows and columns of a matrix; using local numbering of rows.
6038: Collective on Mat
6040: Input Parameters:
6041: + mat - the matrix
6042: . numRows - the number of rows to remove
6043: . rows - the global row indices
6044: . diag - value put in all diagonals of eliminated rows
6045: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6046: - b - optional vector of right hand side, that will be adjusted by provided solution
6048: Notes:
6049: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6050: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6052: The user can set a value in the diagonal entry (or for the AIJ and
6053: row formats can optionally remove the main diagonal entry from the
6054: nonzero structure as well, by passing 0.0 as the final argument).
6056: Level: intermediate
6058: Concepts: matrices^zeroing
6060: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6061: @*/
6062: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6063: {
6065: IS is, newis;
6066: const PetscInt *newRows;
6072: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6073: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6074: MatCheckPreallocated(mat,1);
6076: if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6077: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6078: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6079: ISGetIndices(newis,&newRows);
6080: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6081: ISRestoreIndices(newis,&newRows);
6082: ISDestroy(&newis);
6083: ISDestroy(&is);
6084: PetscObjectStateIncrease((PetscObject)mat);
6085: #if defined(PETSC_HAVE_CUSP)
6086: if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6087: mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6088: }
6089: #endif
6090: #if defined(PETSC_HAVE_VIENNACL)
6091: if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6092: mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6093: }
6094: #endif
6095: return(0);
6096: }
6100: /*@C
6101: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6102: of a set of rows and columns of a matrix; using local numbering of rows.
6104: Collective on Mat
6106: Input Parameters:
6107: + mat - the matrix
6108: . is - index set of rows to remove
6109: . diag - value put in all diagonals of eliminated rows
6110: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6111: - b - optional vector of right hand side, that will be adjusted by provided solution
6113: Notes:
6114: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6115: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6117: The user can set a value in the diagonal entry (or for the AIJ and
6118: row formats can optionally remove the main diagonal entry from the
6119: nonzero structure as well, by passing 0.0 as the final argument).
6121: Level: intermediate
6123: Concepts: matrices^zeroing
6125: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6126: @*/
6127: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6128: {
6130: PetscInt numRows;
6131: const PetscInt *rows;
6137: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6138: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6139: MatCheckPreallocated(mat,1);
6141: ISGetLocalSize(is,&numRows);
6142: ISGetIndices(is,&rows);
6143: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6144: ISRestoreIndices(is,&rows);
6145: return(0);
6146: }
6150: /*@
6151: MatGetSize - Returns the numbers of rows and columns in a matrix.
6153: Not Collective
6155: Input Parameter:
6156: . mat - the matrix
6158: Output Parameters:
6159: + m - the number of global rows
6160: - n - the number of global columns
6162: Note: both output parameters can be NULL on input.
6164: Level: beginner
6166: Concepts: matrices^size
6168: .seealso: MatGetLocalSize()
6169: @*/
6170: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6171: {
6174: if (m) *m = mat->rmap->N;
6175: if (n) *n = mat->cmap->N;
6176: return(0);
6177: }
6181: /*@
6182: MatGetLocalSize - Returns the number of rows and columns in a matrix
6183: stored locally. This information may be implementation dependent, so
6184: use with care.
6186: Not Collective
6188: Input Parameters:
6189: . mat - the matrix
6191: Output Parameters:
6192: + m - the number of local rows
6193: - n - the number of local columns
6195: Note: both output parameters can be NULL on input.
6197: Level: beginner
6199: Concepts: matrices^local size
6201: .seealso: MatGetSize()
6202: @*/
6203: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6204: {
6209: if (m) *m = mat->rmap->n;
6210: if (n) *n = mat->cmap->n;
6211: return(0);
6212: }
6216: /*@
6217: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6218: this processor. (The columns of the "diagonal block")
6220: Not Collective, unless matrix has not been allocated, then collective on Mat
6222: Input Parameters:
6223: . mat - the matrix
6225: Output Parameters:
6226: + m - the global index of the first local column
6227: - n - one more than the global index of the last local column
6229: Notes: both output parameters can be NULL on input.
6231: Level: developer
6233: Concepts: matrices^column ownership
6235: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6237: @*/
6238: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6239: {
6245: MatCheckPreallocated(mat,1);
6246: if (m) *m = mat->cmap->rstart;
6247: if (n) *n = mat->cmap->rend;
6248: return(0);
6249: }
6253: /*@
6254: MatGetOwnershipRange - Returns the range of matrix rows owned by
6255: this processor, assuming that the matrix is laid out with the first
6256: n1 rows on the first processor, the next n2 rows on the second, etc.
6257: For certain parallel layouts this range may not be well defined.
6259: Not Collective
6261: Input Parameters:
6262: . mat - the matrix
6264: Output Parameters:
6265: + m - the global index of the first local row
6266: - n - one more than the global index of the last local row
6268: Note: Both output parameters can be NULL on input.
6269: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6270: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6271: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6273: Level: beginner
6275: Concepts: matrices^row ownership
6277: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6279: @*/
6280: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6281: {
6287: MatCheckPreallocated(mat,1);
6288: if (m) *m = mat->rmap->rstart;
6289: if (n) *n = mat->rmap->rend;
6290: return(0);
6291: }
6295: /*@C
6296: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6297: each process
6299: Not Collective, unless matrix has not been allocated, then collective on Mat
6301: Input Parameters:
6302: . mat - the matrix
6304: Output Parameters:
6305: . ranges - start of each processors portion plus one more then the total length at the end
6307: Level: beginner
6309: Concepts: matrices^row ownership
6311: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6313: @*/
6314: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6315: {
6321: MatCheckPreallocated(mat,1);
6322: PetscLayoutGetRanges(mat->rmap,ranges);
6323: return(0);
6324: }
6328: /*@C
6329: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6330: this processor. (The columns of the "diagonal blocks" for each process)
6332: Not Collective, unless matrix has not been allocated, then collective on Mat
6334: Input Parameters:
6335: . mat - the matrix
6337: Output Parameters:
6338: . ranges - start of each processors portion plus one more then the total length at the end
6340: Level: beginner
6342: Concepts: matrices^column ownership
6344: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6346: @*/
6347: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6348: {
6354: MatCheckPreallocated(mat,1);
6355: PetscLayoutGetRanges(mat->cmap,ranges);
6356: return(0);
6357: }
6361: /*@C
6362: MatGetOwnershipIS - Get row and column ownership as index sets
6364: Not Collective
6366: Input Arguments:
6367: . A - matrix of type Elemental
6369: Output Arguments:
6370: + rows - rows in which this process owns elements
6371: . cols - columns in which this process owns elements
6373: Level: intermediate
6375: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MatSetValues()
6376: @*/
6377: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6378: {
6379: PetscErrorCode ierr,(*f)(Mat,IS*,IS*);
6382: MatCheckPreallocated(A,1);
6383: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6384: if (f) {
6385: (*f)(A,rows,cols);
6386: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6387: if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6388: if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6389: }
6390: return(0);
6391: }
6395: /*@C
6396: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6397: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6398: to complete the factorization.
6400: Collective on Mat
6402: Input Parameters:
6403: + mat - the matrix
6404: . row - row permutation
6405: . column - column permutation
6406: - info - structure containing
6407: $ levels - number of levels of fill.
6408: $ expected fill - as ratio of original fill.
6409: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6410: missing diagonal entries)
6412: Output Parameters:
6413: . fact - new matrix that has been symbolically factored
6415: Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6417: Most users should employ the simplified KSP interface for linear solvers
6418: instead of working directly with matrix algebra routines such as this.
6419: See, e.g., KSPCreate().
6421: Level: developer
6423: Concepts: matrices^symbolic LU factorization
6424: Concepts: matrices^factorization
6425: Concepts: LU^symbolic factorization
6427: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6428: MatGetOrdering(), MatFactorInfo
6430: Developer Note: fortran interface is not autogenerated as the f90
6431: interface defintion cannot be generated correctly [due to MatFactorInfo]
6433: @*/
6434: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6435: {
6445: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6446: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6447: if (!(fact)->ops->ilufactorsymbolic) {
6448: const MatSolverPackage spackage;
6449: MatFactorGetSolverPackage(fact,&spackage);
6450: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6451: }
6452: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6453: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6454: MatCheckPreallocated(mat,2);
6456: PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6457: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6458: PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6459: return(0);
6460: }
6464: /*@C
6465: MatICCFactorSymbolic - Performs symbolic incomplete
6466: Cholesky factorization for a symmetric matrix. Use
6467: MatCholeskyFactorNumeric() to complete the factorization.
6469: Collective on Mat
6471: Input Parameters:
6472: + mat - the matrix
6473: . perm - row and column permutation
6474: - info - structure containing
6475: $ levels - number of levels of fill.
6476: $ expected fill - as ratio of original fill.
6478: Output Parameter:
6479: . fact - the factored matrix
6481: Notes:
6482: Most users should employ the KSP interface for linear solvers
6483: instead of working directly with matrix algebra routines such as this.
6484: See, e.g., KSPCreate().
6486: Level: developer
6488: Concepts: matrices^symbolic incomplete Cholesky factorization
6489: Concepts: matrices^factorization
6490: Concepts: Cholsky^symbolic factorization
6492: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6494: Developer Note: fortran interface is not autogenerated as the f90
6495: interface defintion cannot be generated correctly [due to MatFactorInfo]
6497: @*/
6498: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6499: {
6508: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6509: if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6510: if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6511: if (!(fact)->ops->iccfactorsymbolic) {
6512: const MatSolverPackage spackage;
6513: MatFactorGetSolverPackage(fact,&spackage);
6514: SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6515: }
6516: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6517: MatCheckPreallocated(mat,2);
6519: PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6520: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6521: PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6522: return(0);
6523: }
6527: /*@C
6528: MatGetSubMatrices - Extracts several submatrices from a matrix. If submat
6529: points to an array of valid matrices, they may be reused to store the new
6530: submatrices.
6532: Collective on Mat
6534: Input Parameters:
6535: + mat - the matrix
6536: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6537: . irow, icol - index sets of rows and columns to extract
6538: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6540: Output Parameter:
6541: . submat - the array of submatrices
6543: Notes:
6544: MatGetSubMatrices() can extract ONLY sequential submatrices
6545: (from both sequential and parallel matrices). Use MatGetSubMatrix()
6546: to extract a parallel submatrix.
6548: Some matrix types place restrictions on the row and column
6549: indices, such as that they be sorted or that they be equal to each other.
6551: The index sets may not have duplicate entries.
6553: When extracting submatrices from a parallel matrix, each processor can
6554: form a different submatrix by setting the rows and columns of its
6555: individual index sets according to the local submatrix desired.
6557: When finished using the submatrices, the user should destroy
6558: them with MatDestroyMatrices().
6560: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6561: original matrix has not changed from that last call to MatGetSubMatrices().
6563: This routine creates the matrices in submat; you should NOT create them before
6564: calling it. It also allocates the array of matrix pointers submat.
6566: For BAIJ matrices the index sets must respect the block structure, that is if they
6567: request one row/column in a block, they must request all rows/columns that are in
6568: that block. For example, if the block size is 2 you cannot request just row 0 and
6569: column 0.
6571: Fortran Note:
6572: The Fortran interface is slightly different from that given below; it
6573: requires one to pass in as submat a Mat (integer) array of size at least m.
6575: Level: advanced
6577: Concepts: matrices^accessing submatrices
6578: Concepts: submatrices
6580: .seealso: MatDestroyMatrices(), MatGetSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6581: @*/
6582: PetscErrorCode MatGetSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6583: {
6585: PetscInt i;
6586: PetscBool eq;
6591: if (n) {
6596: }
6598: if (n && scall == MAT_REUSE_MATRIX) {
6601: }
6602: if (!mat->ops->getsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6603: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6604: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6605: MatCheckPreallocated(mat,1);
6607: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6608: (*mat->ops->getsubmatrices)(mat,n,irow,icol,scall,submat);
6609: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6610: for (i=0; i<n; i++) {
6611: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6612: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6613: ISEqual(irow[i],icol[i],&eq);
6614: if (eq) {
6615: if (mat->symmetric) {
6616: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6617: } else if (mat->hermitian) {
6618: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6619: } else if (mat->structurally_symmetric) {
6620: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6621: }
6622: }
6623: }
6624: }
6625: return(0);
6626: }
6630: PetscErrorCode MatGetSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6631: {
6633: PetscInt i;
6634: PetscBool eq;
6639: if (n) {
6644: }
6646: if (n && scall == MAT_REUSE_MATRIX) {
6649: }
6650: if (!mat->ops->getsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6651: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6652: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6653: MatCheckPreallocated(mat,1);
6655: PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6656: (*mat->ops->getsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6657: PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6658: for (i=0; i<n; i++) {
6659: if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6660: ISEqual(irow[i],icol[i],&eq);
6661: if (eq) {
6662: if (mat->symmetric) {
6663: MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6664: } else if (mat->hermitian) {
6665: MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6666: } else if (mat->structurally_symmetric) {
6667: MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6668: }
6669: }
6670: }
6671: }
6672: return(0);
6673: }
6677: /*@C
6678: MatDestroyMatrices - Destroys a set of matrices obtained with MatGetSubMatrices().
6680: Collective on Mat
6682: Input Parameters:
6683: + n - the number of local matrices
6684: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6685: sequence of MatGetSubMatrices())
6687: Level: advanced
6689: Notes: Frees not only the matrices, but also the array that contains the matrices
6690: In Fortran will not free the array.
6692: .seealso: MatGetSubMatrices()
6693: @*/
6694: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6695: {
6697: PetscInt i;
6700: if (!*mat) return(0);
6701: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6703: for (i=0; i<n; i++) {
6704: MatDestroy(&(*mat)[i]);
6705: }
6706: /* memory is allocated even if n = 0 */
6707: PetscFree(*mat);
6708: *mat = NULL;
6709: return(0);
6710: }
6714: /*@C
6715: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6717: Collective on Mat
6719: Input Parameters:
6720: . mat - the matrix
6722: Output Parameter:
6723: . matstruct - the sequential matrix with the nonzero structure of mat
6725: Level: intermediate
6727: .seealso: MatDestroySeqNonzeroStructure(), MatGetSubMatrices(), MatDestroyMatrices()
6728: @*/
6729: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6730: {
6738: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6739: MatCheckPreallocated(mat,1);
6741: if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6742: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6743: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6744: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6745: return(0);
6746: }
6750: /*@C
6751: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
6753: Collective on Mat
6755: Input Parameters:
6756: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6757: sequence of MatGetSequentialNonzeroStructure())
6759: Level: advanced
6761: Notes: Frees not only the matrices, but also the array that contains the matrices
6763: .seealso: MatGetSeqNonzeroStructure()
6764: @*/
6765: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6766: {
6771: MatDestroy(mat);
6772: return(0);
6773: }
6777: /*@
6778: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6779: replaces the index sets by larger ones that represent submatrices with
6780: additional overlap.
6782: Collective on Mat
6784: Input Parameters:
6785: + mat - the matrix
6786: . n - the number of index sets
6787: . is - the array of index sets (these index sets will changed during the call)
6788: - ov - the additional overlap requested
6790: Level: developer
6792: Concepts: overlap
6793: Concepts: ASM^computing overlap
6795: .seealso: MatGetSubMatrices()
6796: @*/
6797: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6798: {
6804: if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6805: if (n) {
6808: }
6809: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6810: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6811: MatCheckPreallocated(mat,1);
6813: if (!ov) return(0);
6814: if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6815: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6816: (*mat->ops->increaseoverlap)(mat,n,is,ov);
6817: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6818: return(0);
6819: }
6823: /*@
6824: MatGetBlockSize - Returns the matrix block size.
6826: Not Collective
6828: Input Parameter:
6829: . mat - the matrix
6831: Output Parameter:
6832: . bs - block size
6834: Notes:
6835: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6837: If the block size has not been set yet this routine returns 1.
6839: Level: intermediate
6841: Concepts: matrices^block size
6843: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
6844: @*/
6845: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
6846: {
6850: *bs = PetscAbs(mat->rmap->bs);
6851: return(0);
6852: }
6856: /*@
6857: MatGetBlockSizes - Returns the matrix block row and column sizes.
6859: Not Collective
6861: Input Parameter:
6862: . mat - the matrix
6864: Output Parameter:
6865: . rbs - row block size
6866: . cbs - coumn block size
6868: Notes:
6869: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6870: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
6872: If a block size has not been set yet this routine returns 1.
6874: Level: intermediate
6876: Concepts: matrices^block size
6878: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
6879: @*/
6880: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
6881: {
6886: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
6887: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
6888: return(0);
6889: }
6893: /*@
6894: MatSetBlockSize - Sets the matrix block size.
6896: Logically Collective on Mat
6898: Input Parameters:
6899: + mat - the matrix
6900: - bs - block size
6902: Notes:
6903: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6905: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6907: Level: intermediate
6909: Concepts: matrices^block size
6911: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
6912: @*/
6913: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
6914: {
6920: PetscLayoutSetBlockSize(mat->rmap,bs);
6921: PetscLayoutSetBlockSize(mat->cmap,bs);
6922: return(0);
6923: }
6927: /*@
6928: MatSetBlockSizes - Sets the matrix block row and column sizes.
6930: Logically Collective on Mat
6932: Input Parameters:
6933: + mat - the matrix
6934: - rbs - row block size
6935: - cbs - column block size
6937: Notes:
6938: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
6939: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
6941: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later
6943: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
6945: Level: intermediate
6947: Concepts: matrices^block size
6949: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
6950: @*/
6951: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
6952: {
6959: PetscLayoutSetBlockSize(mat->rmap,rbs);
6960: PetscLayoutSetBlockSize(mat->cmap,cbs);
6961: return(0);
6962: }
6966: /*@
6967: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
6969: Logically Collective on Mat
6971: Input Parameters:
6972: + mat - the matrix
6973: . fromRow - matrix from which to copy row block size
6974: - fromCol - matrix from which to copy column block size (can be same as fromRow)
6976: Level: developer
6978: Concepts: matrices^block size
6980: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
6981: @*/
6982: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
6983: {
6990: if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
6991: if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
6992: return(0);
6993: }
6997: /*@
6998: MatResidual - Default routine to calculate the residual.
7000: Collective on Mat and Vec
7002: Input Parameters:
7003: + mat - the matrix
7004: . b - the right-hand-side
7005: - x - the approximate solution
7007: Output Parameter:
7008: . r - location to store the residual
7010: Level: developer
7012: .keywords: MG, default, multigrid, residual
7014: .seealso: PCMGSetResidual()
7015: @*/
7016: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7017: {
7026: MatCheckPreallocated(mat,1);
7027: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7028: if (!mat->ops->residual) {
7029: MatMult(mat,x,r);
7030: VecAYPX(r,-1.0,b);
7031: } else {
7032: (*mat->ops->residual)(mat,b,x,r);
7033: }
7034: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7035: return(0);
7036: }
7040: /*@C
7041: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7043: Collective on Mat
7045: Input Parameters:
7046: + mat - the matrix
7047: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7048: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7049: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7050: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7051: always used.
7053: Output Parameters:
7054: + n - number of rows in the (possibly compressed) matrix
7055: . ia - the row pointers [of length n+1]
7056: . ja - the column indices
7057: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7058: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7060: Level: developer
7062: Notes: You CANNOT change any of the ia[] or ja[] values.
7064: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values
7066: Fortran Node
7068: In Fortran use
7069: $ PetscInt ia(1), ja(1)
7070: $ PetscOffset iia, jja
7071: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7072: $
7073: $ or
7074: $
7075: $ PetscScalar, pointer :: xx_v(:)
7076: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7079: Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
7081: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7082: @*/
7083: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7084: {
7094: MatCheckPreallocated(mat,1);
7095: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7096: else {
7097: *done = PETSC_TRUE;
7098: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7099: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7100: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7101: }
7102: return(0);
7103: }
7107: /*@C
7108: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7110: Collective on Mat
7112: Input Parameters:
7113: + mat - the matrix
7114: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7115: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7116: symmetrized
7117: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7118: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7119: always used.
7120: . n - number of columns in the (possibly compressed) matrix
7121: . ia - the column pointers
7122: - ja - the row indices
7124: Output Parameters:
7125: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7127: Note:
7128: This routine zeros out n, ia, and ja. This is to prevent accidental
7129: us of the array after it has been restored. If you pass NULL, it will
7130: not zero the pointers. Use of ia or ja after MatRestoreColumnIJ() is invalid.
7132: Level: developer
7134: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7135: @*/
7136: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7137: {
7147: MatCheckPreallocated(mat,1);
7148: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7149: else {
7150: *done = PETSC_TRUE;
7151: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7152: }
7153: return(0);
7154: }
7158: /*@C
7159: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7160: MatGetRowIJ().
7162: Collective on Mat
7164: Input Parameters:
7165: + mat - the matrix
7166: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7167: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7168: symmetrized
7169: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7170: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7171: always used.
7172: . n - size of (possibly compressed) matrix
7173: . ia - the row pointers
7174: - ja - the column indices
7176: Output Parameters:
7177: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7179: Note:
7180: This routine zeros out n, ia, and ja. This is to prevent accidental
7181: us of the array after it has been restored. If you pass NULL, it will
7182: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7184: Level: developer
7186: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7187: @*/
7188: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7189: {
7198: MatCheckPreallocated(mat,1);
7200: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7201: else {
7202: *done = PETSC_TRUE;
7203: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7204: if (n) *n = 0;
7205: if (ia) *ia = NULL;
7206: if (ja) *ja = NULL;
7207: }
7208: return(0);
7209: }
7213: /*@C
7214: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7215: MatGetColumnIJ().
7217: Collective on Mat
7219: Input Parameters:
7220: + mat - the matrix
7221: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7222: - symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7223: symmetrized
7224: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7225: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7226: always used.
7228: Output Parameters:
7229: + n - size of (possibly compressed) matrix
7230: . ia - the column pointers
7231: . ja - the row indices
7232: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7234: Level: developer
7236: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7237: @*/
7238: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7239: {
7248: MatCheckPreallocated(mat,1);
7250: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7251: else {
7252: *done = PETSC_TRUE;
7253: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7254: if (n) *n = 0;
7255: if (ia) *ia = NULL;
7256: if (ja) *ja = NULL;
7257: }
7258: return(0);
7259: }
7263: /*@C
7264: MatColoringPatch -Used inside matrix coloring routines that
7265: use MatGetRowIJ() and/or MatGetColumnIJ().
7267: Collective on Mat
7269: Input Parameters:
7270: + mat - the matrix
7271: . ncolors - max color value
7272: . n - number of entries in colorarray
7273: - colorarray - array indicating color for each column
7275: Output Parameters:
7276: . iscoloring - coloring generated using colorarray information
7278: Level: developer
7280: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7282: @*/
7283: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7284: {
7292: MatCheckPreallocated(mat,1);
7294: if (!mat->ops->coloringpatch) {
7295: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7296: } else {
7297: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7298: }
7299: return(0);
7300: }
7305: /*@
7306: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7308: Logically Collective on Mat
7310: Input Parameter:
7311: . mat - the factored matrix to be reset
7313: Notes:
7314: This routine should be used only with factored matrices formed by in-place
7315: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7316: format). This option can save memory, for example, when solving nonlinear
7317: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7318: ILU(0) preconditioner.
7320: Note that one can specify in-place ILU(0) factorization by calling
7321: .vb
7322: PCType(pc,PCILU);
7323: PCFactorSeUseInPlace(pc);
7324: .ve
7325: or by using the options -pc_type ilu -pc_factor_in_place
7327: In-place factorization ILU(0) can also be used as a local
7328: solver for the blocks within the block Jacobi or additive Schwarz
7329: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7330: for details on setting local solver options.
7332: Most users should employ the simplified KSP interface for linear solvers
7333: instead of working directly with matrix algebra routines such as this.
7334: See, e.g., KSPCreate().
7336: Level: developer
7338: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7340: Concepts: matrices^unfactored
7342: @*/
7343: PetscErrorCode MatSetUnfactored(Mat mat)
7344: {
7350: MatCheckPreallocated(mat,1);
7351: mat->factortype = MAT_FACTOR_NONE;
7352: if (!mat->ops->setunfactored) return(0);
7353: (*mat->ops->setunfactored)(mat);
7354: return(0);
7355: }
7357: /*MC
7358: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7360: Synopsis:
7361: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7363: Not collective
7365: Input Parameter:
7366: . x - matrix
7368: Output Parameters:
7369: + xx_v - the Fortran90 pointer to the array
7370: - ierr - error code
7372: Example of Usage:
7373: .vb
7374: PetscScalar, pointer xx_v(:,:)
7375: ....
7376: call MatDenseGetArrayF90(x,xx_v,ierr)
7377: a = xx_v(3)
7378: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7379: .ve
7381: Level: advanced
7383: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7385: Concepts: matrices^accessing array
7387: M*/
7389: /*MC
7390: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7391: accessed with MatDenseGetArrayF90().
7393: Synopsis:
7394: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7396: Not collective
7398: Input Parameters:
7399: + x - matrix
7400: - xx_v - the Fortran90 pointer to the array
7402: Output Parameter:
7403: . ierr - error code
7405: Example of Usage:
7406: .vb
7407: PetscScalar, pointer xx_v(:)
7408: ....
7409: call MatDenseGetArrayF90(x,xx_v,ierr)
7410: a = xx_v(3)
7411: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7412: .ve
7414: Level: advanced
7416: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7418: M*/
7421: /*MC
7422: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7424: Synopsis:
7425: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7427: Not collective
7429: Input Parameter:
7430: . x - matrix
7432: Output Parameters:
7433: + xx_v - the Fortran90 pointer to the array
7434: - ierr - error code
7436: Example of Usage:
7437: .vb
7438: PetscScalar, pointer xx_v(:,:)
7439: ....
7440: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7441: a = xx_v(3)
7442: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7443: .ve
7445: Level: advanced
7447: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7449: Concepts: matrices^accessing array
7451: M*/
7453: /*MC
7454: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7455: accessed with MatSeqAIJGetArrayF90().
7457: Synopsis:
7458: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7460: Not collective
7462: Input Parameters:
7463: + x - matrix
7464: - xx_v - the Fortran90 pointer to the array
7466: Output Parameter:
7467: . ierr - error code
7469: Example of Usage:
7470: .vb
7471: PetscScalar, pointer xx_v(:)
7472: ....
7473: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7474: a = xx_v(3)
7475: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7476: .ve
7478: Level: advanced
7480: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7482: M*/
7487: /*@
7488: MatGetSubMatrix - Gets a single submatrix on the same number of processors
7489: as the original matrix.
7491: Collective on Mat
7493: Input Parameters:
7494: + mat - the original matrix
7495: . isrow - parallel IS containing the rows this processor should obtain
7496: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7497: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7499: Output Parameter:
7500: . newmat - the new submatrix, of the same type as the old
7502: Level: advanced
7504: Notes:
7505: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7507: Some matrix types place restrictions on the row and column indices, such
7508: as that they be sorted or that they be equal to each other.
7510: The index sets may not have duplicate entries.
7512: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7513: the MatGetSubMatrix() routine will create the newmat for you. Any additional calls
7514: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7515: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7516: you are finished using it.
7518: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7519: the input matrix.
7521: If iscol is NULL then all columns are obtained (not supported in Fortran).
7523: Example usage:
7524: Consider the following 8x8 matrix with 34 non-zero values, that is
7525: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7526: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7527: as follows:
7529: .vb
7530: 1 2 0 | 0 3 0 | 0 4
7531: Proc0 0 5 6 | 7 0 0 | 8 0
7532: 9 0 10 | 11 0 0 | 12 0
7533: -------------------------------------
7534: 13 0 14 | 15 16 17 | 0 0
7535: Proc1 0 18 0 | 19 20 21 | 0 0
7536: 0 0 0 | 22 23 0 | 24 0
7537: -------------------------------------
7538: Proc2 25 26 27 | 0 0 28 | 29 0
7539: 30 0 0 | 31 32 33 | 0 34
7540: .ve
7542: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7544: .vb
7545: 2 0 | 0 3 0 | 0
7546: Proc0 5 6 | 7 0 0 | 8
7547: -------------------------------
7548: Proc1 18 0 | 19 20 21 | 0
7549: -------------------------------
7550: Proc2 26 27 | 0 0 28 | 29
7551: 0 0 | 31 32 33 | 0
7552: .ve
7555: Concepts: matrices^submatrices
7557: .seealso: MatGetSubMatrices()
7558: @*/
7559: PetscErrorCode MatGetSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7560: {
7562: PetscMPIInt size;
7563: Mat *local;
7564: IS iscoltmp;
7573: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7574: if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");
7576: MatCheckPreallocated(mat,1);
7577: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7579: if (!iscol || isrow == iscol) {
7580: PetscBool stride;
7581: PetscMPIInt grabentirematrix = 0,grab;
7582: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7583: if (stride) {
7584: PetscInt first,step,n,rstart,rend;
7585: ISStrideGetInfo(isrow,&first,&step);
7586: if (step == 1) {
7587: MatGetOwnershipRange(mat,&rstart,&rend);
7588: if (rstart == first) {
7589: ISGetLocalSize(isrow,&n);
7590: if (n == rend-rstart) {
7591: grabentirematrix = 1;
7592: }
7593: }
7594: }
7595: }
7596: MPI_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7597: if (grab) {
7598: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7599: if (cll == MAT_INITIAL_MATRIX) {
7600: *newmat = mat;
7601: PetscObjectReference((PetscObject)mat);
7602: }
7603: return(0);
7604: }
7605: }
7607: if (!iscol) {
7608: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7609: } else {
7610: iscoltmp = iscol;
7611: }
7613: /* if original matrix is on just one processor then use submatrix generated */
7614: if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7615: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7616: if (!iscol) {ISDestroy(&iscoltmp);}
7617: return(0);
7618: } else if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1) {
7619: MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7620: *newmat = *local;
7621: PetscFree(local);
7622: if (!iscol) {ISDestroy(&iscoltmp);}
7623: return(0);
7624: } else if (!mat->ops->getsubmatrix) {
7625: /* Create a new matrix type that implements the operation using the full matrix */
7626: PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7627: switch (cll) {
7628: case MAT_INITIAL_MATRIX:
7629: MatCreateSubMatrix(mat,isrow,iscoltmp,newmat);
7630: break;
7631: case MAT_REUSE_MATRIX:
7632: MatSubMatrixUpdate(*newmat,mat,isrow,iscoltmp);
7633: break;
7634: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7635: }
7636: PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7637: if (!iscol) {ISDestroy(&iscoltmp);}
7638: return(0);
7639: }
7641: if (!mat->ops->getsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7642: PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7643: (*mat->ops->getsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7644: PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7645: if (!iscol) {ISDestroy(&iscoltmp);}
7646: if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7647: return(0);
7648: }
7652: /*@
7653: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7654: used during the assembly process to store values that belong to
7655: other processors.
7657: Not Collective
7659: Input Parameters:
7660: + mat - the matrix
7661: . size - the initial size of the stash.
7662: - bsize - the initial size of the block-stash(if used).
7664: Options Database Keys:
7665: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
7666: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
7668: Level: intermediate
7670: Notes:
7671: The block-stash is used for values set with MatSetValuesBlocked() while
7672: the stash is used for values set with MatSetValues()
7674: Run with the option -info and look for output of the form
7675: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7676: to determine the appropriate value, MM, to use for size and
7677: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7678: to determine the value, BMM to use for bsize
7680: Concepts: stash^setting matrix size
7681: Concepts: matrices^stash
7683: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
7685: @*/
7686: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7687: {
7693: MatStashSetInitialSize_Private(&mat->stash,size);
7694: MatStashSetInitialSize_Private(&mat->bstash,bsize);
7695: return(0);
7696: }
7700: /*@
7701: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7702: the matrix
7704: Neighbor-wise Collective on Mat
7706: Input Parameters:
7707: + mat - the matrix
7708: . x,y - the vectors
7709: - w - where the result is stored
7711: Level: intermediate
7713: Notes:
7714: w may be the same vector as y.
7716: This allows one to use either the restriction or interpolation (its transpose)
7717: matrix to do the interpolation
7719: Concepts: interpolation
7721: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7723: @*/
7724: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7725: {
7727: PetscInt M,N,Ny;
7735: MatCheckPreallocated(A,1);
7736: MatGetSize(A,&M,&N);
7737: VecGetSize(y,&Ny);
7738: if (M == Ny) {
7739: MatMultAdd(A,x,y,w);
7740: } else {
7741: MatMultTransposeAdd(A,x,y,w);
7742: }
7743: return(0);
7744: }
7748: /*@
7749: MatInterpolate - y = A*x or A'*x depending on the shape of
7750: the matrix
7752: Neighbor-wise Collective on Mat
7754: Input Parameters:
7755: + mat - the matrix
7756: - x,y - the vectors
7758: Level: intermediate
7760: Notes:
7761: This allows one to use either the restriction or interpolation (its transpose)
7762: matrix to do the interpolation
7764: Concepts: matrices^interpolation
7766: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7768: @*/
7769: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7770: {
7772: PetscInt M,N,Ny;
7779: MatCheckPreallocated(A,1);
7780: MatGetSize(A,&M,&N);
7781: VecGetSize(y,&Ny);
7782: if (M == Ny) {
7783: MatMult(A,x,y);
7784: } else {
7785: MatMultTranspose(A,x,y);
7786: }
7787: return(0);
7788: }
7792: /*@
7793: MatRestrict - y = A*x or A'*x
7795: Neighbor-wise Collective on Mat
7797: Input Parameters:
7798: + mat - the matrix
7799: - x,y - the vectors
7801: Level: intermediate
7803: Notes:
7804: This allows one to use either the restriction or interpolation (its transpose)
7805: matrix to do the restriction
7807: Concepts: matrices^restriction
7809: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
7811: @*/
7812: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
7813: {
7815: PetscInt M,N,Ny;
7822: MatCheckPreallocated(A,1);
7824: MatGetSize(A,&M,&N);
7825: VecGetSize(y,&Ny);
7826: if (M == Ny) {
7827: MatMult(A,x,y);
7828: } else {
7829: MatMultTranspose(A,x,y);
7830: }
7831: return(0);
7832: }
7836: /*@
7837: MatGetNullSpace - retrieves the null space to a matrix.
7839: Logically Collective on Mat and MatNullSpace
7841: Input Parameters:
7842: + mat - the matrix
7843: - nullsp - the null space object
7845: Level: developer
7847: Concepts: null space^attaching to matrix
7849: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
7850: @*/
7851: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
7852: {
7857: *nullsp = mat->nullsp;
7858: return(0);
7859: }
7863: /*@
7864: MatSetNullSpace - attaches a null space to a matrix.
7866: Logically Collective on Mat and MatNullSpace
7868: Input Parameters:
7869: + mat - the matrix
7870: - nullsp - the null space object
7872: Level: advanced
7874: Notes:
7875: This null space is used by the linear solvers. Overwrites any previous null space that may have been attached
7877: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
7878: call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.
7881: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
7882: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
7883: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
7884: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
7885: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
7887: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
7889: Concepts: null space^attaching to matrix
7891: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
7892: @*/
7893: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
7894: {
7901: MatCheckPreallocated(mat,1);
7902: PetscObjectReference((PetscObject)nullsp);
7903: MatNullSpaceDestroy(&mat->nullsp);
7904: mat->nullsp = nullsp;
7905: return(0);
7906: }
7910: /*@
7911: MatGetTransposeNullSpace - retrieves the null space to a matrix.
7913: Logically Collective on Mat and MatNullSpace
7915: Input Parameters:
7916: + mat - the matrix
7917: - nullsp - the null space object
7919: Level: developer
7921: Notes:
7922: This null space is used by solvers. Overwrites any previous null space that may have been attached
7924: Concepts: null space^attaching to matrix
7926: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace()
7927: @*/
7928: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
7929: {
7934: *nullsp = mat->transnullsp;
7935: return(0);
7936: }
7940: /*@
7941: MatSetTransposeNullSpace - attaches a null space to a matrix.
7943: Logically Collective on Mat and MatNullSpace
7945: Input Parameters:
7946: + mat - the matrix
7947: - nullsp - the null space object
7949: Level: advanced
7951: Notes:
7952: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
7953: You must also call MatSetNullSpace()
7956: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
7957: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
7958: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
7959: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
7960: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
7962: Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().
7964: Concepts: null space^attaching to matrix
7966: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetNullSpace(), MatNullSpaceRemove()
7967: @*/
7968: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
7969: {
7976: MatCheckPreallocated(mat,1);
7977: PetscObjectReference((PetscObject)nullsp);
7978: MatNullSpaceDestroy(&mat->transnullsp);
7979: mat->transnullsp = nullsp;
7980: return(0);
7981: }
7985: /*@
7986: MatSetNearNullSpace - attaches a null space to a matrix.
7987: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
7989: Logically Collective on Mat and MatNullSpace
7991: Input Parameters:
7992: + mat - the matrix
7993: - nullsp - the null space object
7995: Level: advanced
7997: Notes:
7998: Overwrites any previous near null space that may have been attached
8000: Concepts: null space^attaching to matrix
8002: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace()
8003: @*/
8004: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8005: {
8012: MatCheckPreallocated(mat,1);
8013: PetscObjectReference((PetscObject)nullsp);
8014: MatNullSpaceDestroy(&mat->nearnullsp);
8016: mat->nearnullsp = nullsp;
8017: return(0);
8018: }
8022: /*@
8023: MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()
8025: Not Collective
8027: Input Parameters:
8028: . mat - the matrix
8030: Output Parameters:
8031: . nullsp - the null space object, NULL if not set
8033: Level: developer
8035: Concepts: null space^attaching to matrix
8037: .seealso: MatSetNearNullSpace(), MatGetNullSpace()
8038: @*/
8039: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8040: {
8045: MatCheckPreallocated(mat,1);
8046: *nullsp = mat->nearnullsp;
8047: return(0);
8048: }
8052: /*@C
8053: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8055: Collective on Mat
8057: Input Parameters:
8058: + mat - the matrix
8059: . row - row/column permutation
8060: . fill - expected fill factor >= 1.0
8061: - level - level of fill, for ICC(k)
8063: Notes:
8064: Probably really in-place only when level of fill is zero, otherwise allocates
8065: new space to store factored matrix and deletes previous memory.
8067: Most users should employ the simplified KSP interface for linear solvers
8068: instead of working directly with matrix algebra routines such as this.
8069: See, e.g., KSPCreate().
8071: Level: developer
8073: Concepts: matrices^incomplete Cholesky factorization
8074: Concepts: Cholesky factorization
8076: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8078: Developer Note: fortran interface is not autogenerated as the f90
8079: interface defintion cannot be generated correctly [due to MatFactorInfo]
8081: @*/
8082: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8083: {
8091: if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8092: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8093: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8094: if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8095: MatCheckPreallocated(mat,1);
8096: (*mat->ops->iccfactor)(mat,row,info);
8097: PetscObjectStateIncrease((PetscObject)mat);
8098: return(0);
8099: }
8103: /*@
8104: MatSetValuesAdifor - Sets values computed with automatic differentiation into a matrix.
8106: Not Collective
8108: Input Parameters:
8109: + mat - the matrix
8110: . nl - leading dimension of v
8111: - v - the values compute with ADIFOR
8113: Level: developer
8115: Notes:
8116: Must call MatSetColoring() before using this routine. Also this matrix must already
8117: have its nonzero pattern determined.
8119: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
8120: MatSetValues(), MatSetColoring()
8121: @*/
8122: PetscErrorCode MatSetValuesAdifor(Mat mat,PetscInt nl,void *v)
8123: {
8131: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8132: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
8133: if (!mat->ops->setvaluesadifor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8134: (*mat->ops->setvaluesadifor)(mat,nl,v);
8135: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
8136: PetscObjectStateIncrease((PetscObject)mat);
8137: return(0);
8138: }
8142: /*@
8143: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8144: ghosted ones.
8146: Not Collective
8148: Input Parameters:
8149: + mat - the matrix
8150: - diag = the diagonal values, including ghost ones
8152: Level: developer
8154: Notes: Works only for MPIAIJ and MPIBAIJ matrices
8156: .seealso: MatDiagonalScale()
8157: @*/
8158: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8159: {
8161: PetscMPIInt size;
8168: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8169: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8170: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8171: if (size == 1) {
8172: PetscInt n,m;
8173: VecGetSize(diag,&n);
8174: MatGetSize(mat,0,&m);
8175: if (m == n) {
8176: MatDiagonalScale(mat,0,diag);
8177: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8178: } else {
8179: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8180: }
8181: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8182: PetscObjectStateIncrease((PetscObject)mat);
8183: return(0);
8184: }
8188: /*@
8189: MatGetInertia - Gets the inertia from a factored matrix
8191: Collective on Mat
8193: Input Parameter:
8194: . mat - the matrix
8196: Output Parameters:
8197: + nneg - number of negative eigenvalues
8198: . nzero - number of zero eigenvalues
8199: - npos - number of positive eigenvalues
8201: Level: advanced
8203: Notes: Matrix must have been factored by MatCholeskyFactor()
8206: @*/
8207: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8208: {
8214: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8215: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8216: if (!mat->ops->getinertia) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8217: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8218: return(0);
8219: }
8221: /* ----------------------------------------------------------------*/
8224: /*@C
8225: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8227: Neighbor-wise Collective on Mat and Vecs
8229: Input Parameters:
8230: + mat - the factored matrix
8231: - b - the right-hand-side vectors
8233: Output Parameter:
8234: . x - the result vectors
8236: Notes:
8237: The vectors b and x cannot be the same. I.e., one cannot
8238: call MatSolves(A,x,x).
8240: Notes:
8241: Most users should employ the simplified KSP interface for linear solvers
8242: instead of working directly with matrix algebra routines such as this.
8243: See, e.g., KSPCreate().
8245: Level: developer
8247: Concepts: matrices^triangular solves
8249: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8250: @*/
8251: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8252: {
8258: if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8259: if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8260: if (!mat->rmap->N && !mat->cmap->N) return(0);
8262: if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8263: MatCheckPreallocated(mat,1);
8264: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8265: (*mat->ops->solves)(mat,b,x);
8266: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8267: return(0);
8268: }
8272: /*@
8273: MatIsSymmetric - Test whether a matrix is symmetric
8275: Collective on Mat
8277: Input Parameter:
8278: + A - the matrix to test
8279: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8281: Output Parameters:
8282: . flg - the result
8284: Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8286: Level: intermediate
8288: Concepts: matrix^symmetry
8290: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8291: @*/
8292: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8293: {
8300: if (!A->symmetric_set) {
8301: if (!A->ops->issymmetric) {
8302: MatType mattype;
8303: MatGetType(A,&mattype);
8304: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8305: }
8306: (*A->ops->issymmetric)(A,tol,flg);
8307: if (!tol) {
8308: A->symmetric_set = PETSC_TRUE;
8309: A->symmetric = *flg;
8310: if (A->symmetric) {
8311: A->structurally_symmetric_set = PETSC_TRUE;
8312: A->structurally_symmetric = PETSC_TRUE;
8313: }
8314: }
8315: } else if (A->symmetric) {
8316: *flg = PETSC_TRUE;
8317: } else if (!tol) {
8318: *flg = PETSC_FALSE;
8319: } else {
8320: if (!A->ops->issymmetric) {
8321: MatType mattype;
8322: MatGetType(A,&mattype);
8323: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8324: }
8325: (*A->ops->issymmetric)(A,tol,flg);
8326: }
8327: return(0);
8328: }
8332: /*@
8333: MatIsHermitian - Test whether a matrix is Hermitian
8335: Collective on Mat
8337: Input Parameter:
8338: + A - the matrix to test
8339: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8341: Output Parameters:
8342: . flg - the result
8344: Level: intermediate
8346: Concepts: matrix^symmetry
8348: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8349: MatIsSymmetricKnown(), MatIsSymmetric()
8350: @*/
8351: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8352: {
8359: if (!A->hermitian_set) {
8360: if (!A->ops->ishermitian) {
8361: MatType mattype;
8362: MatGetType(A,&mattype);
8363: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8364: }
8365: (*A->ops->ishermitian)(A,tol,flg);
8366: if (!tol) {
8367: A->hermitian_set = PETSC_TRUE;
8368: A->hermitian = *flg;
8369: if (A->hermitian) {
8370: A->structurally_symmetric_set = PETSC_TRUE;
8371: A->structurally_symmetric = PETSC_TRUE;
8372: }
8373: }
8374: } else if (A->hermitian) {
8375: *flg = PETSC_TRUE;
8376: } else if (!tol) {
8377: *flg = PETSC_FALSE;
8378: } else {
8379: if (!A->ops->ishermitian) {
8380: MatType mattype;
8381: MatGetType(A,&mattype);
8382: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8383: }
8384: (*A->ops->ishermitian)(A,tol,flg);
8385: }
8386: return(0);
8387: }
8391: /*@
8392: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8394: Not Collective
8396: Input Parameter:
8397: . A - the matrix to check
8399: Output Parameters:
8400: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8401: - flg - the result
8403: Level: advanced
8405: Concepts: matrix^symmetry
8407: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8408: if you want it explicitly checked
8410: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8411: @*/
8412: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8413: {
8418: if (A->symmetric_set) {
8419: *set = PETSC_TRUE;
8420: *flg = A->symmetric;
8421: } else {
8422: *set = PETSC_FALSE;
8423: }
8424: return(0);
8425: }
8429: /*@
8430: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8432: Not Collective
8434: Input Parameter:
8435: . A - the matrix to check
8437: Output Parameters:
8438: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8439: - flg - the result
8441: Level: advanced
8443: Concepts: matrix^symmetry
8445: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8446: if you want it explicitly checked
8448: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8449: @*/
8450: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8451: {
8456: if (A->hermitian_set) {
8457: *set = PETSC_TRUE;
8458: *flg = A->hermitian;
8459: } else {
8460: *set = PETSC_FALSE;
8461: }
8462: return(0);
8463: }
8467: /*@
8468: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8470: Collective on Mat
8472: Input Parameter:
8473: . A - the matrix to test
8475: Output Parameters:
8476: . flg - the result
8478: Level: intermediate
8480: Concepts: matrix^symmetry
8482: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8483: @*/
8484: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8485: {
8491: if (!A->structurally_symmetric_set) {
8492: if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8493: (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);
8495: A->structurally_symmetric_set = PETSC_TRUE;
8496: }
8497: *flg = A->structurally_symmetric;
8498: return(0);
8499: }
8503: extern PetscErrorCode MatStashGetInfo_Private(MatStash*,PetscInt*,PetscInt*);
8504: /*@
8505: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8506: to be communicated to other processors during the MatAssemblyBegin/End() process
8508: Not collective
8510: Input Parameter:
8511: . vec - the vector
8513: Output Parameters:
8514: + nstash - the size of the stash
8515: . reallocs - the number of additional mallocs incurred.
8516: . bnstash - the size of the block stash
8517: - breallocs - the number of additional mallocs incurred.in the block stash
8519: Level: advanced
8521: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8523: @*/
8524: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8525: {
8529: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8530: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8531: return(0);
8532: }
8536: /*@C
8537: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8538: parallel layout
8540: Collective on Mat
8542: Input Parameter:
8543: . mat - the matrix
8545: Output Parameter:
8546: + right - (optional) vector that the matrix can be multiplied against
8547: - left - (optional) vector that the matrix vector product can be stored in
8549: Notes:
8550: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8552: Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8554: Level: advanced
8556: .seealso: MatCreate(), VecDestroy()
8557: @*/
8558: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8559: {
8565: MatCheckPreallocated(mat,1);
8566: if (mat->ops->getvecs) {
8567: (*mat->ops->getvecs)(mat,right,left);
8568: } else {
8569: PetscMPIInt size;
8570: PetscInt rbs,cbs;
8571: MPI_Comm_size(PetscObjectComm((PetscObject)mat), &size);
8572: MatGetBlockSizes(mat,&rbs,&cbs);
8573: if (right) {
8574: VecCreate(PetscObjectComm((PetscObject)mat),right);
8575: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8576: VecSetBlockSize(*right,cbs);
8577: VecSetType(*right,VECSTANDARD);
8578: PetscLayoutReference(mat->cmap,&(*right)->map);
8579: }
8580: if (left) {
8581: VecCreate(PetscObjectComm((PetscObject)mat),left);
8582: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8583: VecSetBlockSize(*left,rbs);
8584: VecSetType(*left,VECSTANDARD);
8585: PetscLayoutReference(mat->rmap,&(*left)->map);
8586: }
8587: }
8588: return(0);
8589: }
8593: /*@C
8594: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8595: with default values.
8597: Not Collective
8599: Input Parameters:
8600: . info - the MatFactorInfo data structure
8603: Notes: The solvers are generally used through the KSP and PC objects, for example
8604: PCLU, PCILU, PCCHOLESKY, PCICC
8606: Level: developer
8608: .seealso: MatFactorInfo
8610: Developer Note: fortran interface is not autogenerated as the f90
8611: interface defintion cannot be generated correctly [due to MatFactorInfo]
8613: @*/
8615: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8616: {
8620: PetscMemzero(info,sizeof(MatFactorInfo));
8621: return(0);
8622: }
8626: /*@
8627: MatPtAP - Creates the matrix product C = P^T * A * P
8629: Neighbor-wise Collective on Mat
8631: Input Parameters:
8632: + A - the matrix
8633: . P - the projection matrix
8634: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8635: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P))
8637: Output Parameters:
8638: . C - the product matrix
8640: Notes:
8641: C will be created and must be destroyed by the user with MatDestroy().
8643: This routine is currently only implemented for pairs of AIJ matrices and classes
8644: which inherit from AIJ.
8646: Level: intermediate
8648: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
8649: @*/
8650: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
8651: {
8653: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
8654: PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
8655: PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
8656: PetscBool viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;
8659: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
8660: PetscOptionsGetBool(((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);
8664: MatCheckPreallocated(A,1);
8665: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8666: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8669: MatCheckPreallocated(P,2);
8670: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8671: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8673: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8674: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8676: if (scall == MAT_REUSE_MATRIX) {
8679: if (viatranspose || viamatmatmatmult) {
8680: Mat Pt;
8681: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8682: if (viamatmatmatmult) {
8683: MatMatMatMult(Pt,A,P,scall,fill,C);
8684: } else {
8685: Mat AP;
8686: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8687: MatMatMult(Pt,AP,scall,fill,C);
8688: MatDestroy(&AP);
8689: }
8690: MatDestroy(&Pt);
8691: } else {
8692: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8693: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8694: (*(*C)->ops->ptapnumeric)(A,P,*C);
8695: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8696: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8697: }
8698: return(0);
8699: }
8701: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
8702: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8704: fA = A->ops->ptap;
8705: fP = P->ops->ptap;
8706: if (fP == fA) {
8707: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
8708: ptap = fA;
8709: } else {
8710: /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
8711: char ptapname[256];
8712: PetscStrcpy(ptapname,"MatPtAP_");
8713: PetscStrcat(ptapname,((PetscObject)A)->type_name);
8714: PetscStrcat(ptapname,"_");
8715: PetscStrcat(ptapname,((PetscObject)P)->type_name);
8716: PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
8717: PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
8718: if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
8719: }
8721: if (viatranspose || viamatmatmatmult) {
8722: Mat Pt;
8723: MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
8724: if (viamatmatmatmult) {
8725: MatMatMatMult(Pt,A,P,scall,fill,C);
8726: PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
8727: } else {
8728: Mat AP;
8729: MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
8730: MatMatMult(Pt,AP,scall,fill,C);
8731: MatDestroy(&AP);
8732: PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
8733: }
8734: MatDestroy(&Pt);
8735: } else {
8736: PetscLogEventBegin(MAT_PtAP,A,P,0,0);
8737: (*ptap)(A,P,scall,fill,C);
8738: PetscLogEventEnd(MAT_PtAP,A,P,0,0);
8739: }
8740: return(0);
8741: }
8745: /*@
8746: MatPtAPNumeric - Computes the matrix product C = P^T * A * P
8748: Neighbor-wise Collective on Mat
8750: Input Parameters:
8751: + A - the matrix
8752: - P - the projection matrix
8754: Output Parameters:
8755: . C - the product matrix
8757: Notes:
8758: C must have been created by calling MatPtAPSymbolic and must be destroyed by
8759: the user using MatDeatroy().
8761: This routine is currently only implemented for pairs of AIJ matrices and classes
8762: which inherit from AIJ. C will be of type MATAIJ.
8764: Level: intermediate
8766: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
8767: @*/
8768: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
8769: {
8775: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8776: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8779: MatCheckPreallocated(P,2);
8780: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8781: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8784: MatCheckPreallocated(C,3);
8785: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8786: if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
8787: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8788: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8789: if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
8790: MatCheckPreallocated(A,1);
8792: PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
8793: (*C->ops->ptapnumeric)(A,P,C);
8794: PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
8795: return(0);
8796: }
8800: /*@
8801: MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P
8803: Neighbor-wise Collective on Mat
8805: Input Parameters:
8806: + A - the matrix
8807: - P - the projection matrix
8809: Output Parameters:
8810: . C - the (i,j) structure of the product matrix
8812: Notes:
8813: C will be created and must be destroyed by the user with MatDestroy().
8815: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8816: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8817: this (i,j) structure by calling MatPtAPNumeric().
8819: Level: intermediate
8821: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
8822: @*/
8823: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
8824: {
8830: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8831: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8832: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8835: MatCheckPreallocated(P,2);
8836: if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8837: if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8840: if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
8841: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8842: MatCheckPreallocated(A,1);
8843: PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
8844: (*A->ops->ptapsymbolic)(A,P,fill,C);
8845: PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
8847: /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
8848: return(0);
8849: }
8853: /*@
8854: MatRARt - Creates the matrix product C = R * A * R^T
8856: Neighbor-wise Collective on Mat
8858: Input Parameters:
8859: + A - the matrix
8860: . R - the projection matrix
8861: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
8862: - fill - expected fill as ratio of nnz(C)/nnz(A)
8864: Output Parameters:
8865: . C - the product matrix
8867: Notes:
8868: C will be created and must be destroyed by the user with MatDestroy().
8870: This routine is currently only implemented for pairs of AIJ matrices and classes
8871: which inherit from AIJ.
8873: Level: intermediate
8875: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
8876: @*/
8877: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
8878: {
8884: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8885: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8888: MatCheckPreallocated(R,2);
8889: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8890: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8892: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8893: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8894: MatCheckPreallocated(A,1);
8896: if (!A->ops->rart) {
8897: MatType mattype;
8898: MatGetType(A,&mattype);
8899: SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix of type <%s> does not support RARt",mattype);
8900: }
8901: PetscLogEventBegin(MAT_RARt,A,R,0,0);
8902: (*A->ops->rart)(A,R,scall,fill,C);
8903: PetscLogEventEnd(MAT_RARt,A,R,0,0);
8904: return(0);
8905: }
8909: /*@
8910: MatRARtNumeric - Computes the matrix product C = R * A * R^T
8912: Neighbor-wise Collective on Mat
8914: Input Parameters:
8915: + A - the matrix
8916: - R - the projection matrix
8918: Output Parameters:
8919: . C - the product matrix
8921: Notes:
8922: C must have been created by calling MatRARtSymbolic and must be destroyed by
8923: the user using MatDeatroy().
8925: This routine is currently only implemented for pairs of AIJ matrices and classes
8926: which inherit from AIJ. C will be of type MATAIJ.
8928: Level: intermediate
8930: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
8931: @*/
8932: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
8933: {
8939: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8940: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8943: MatCheckPreallocated(R,2);
8944: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8945: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8948: MatCheckPreallocated(C,3);
8949: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8950: if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
8951: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
8952: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
8953: if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
8954: MatCheckPreallocated(A,1);
8956: PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
8957: (*A->ops->rartnumeric)(A,R,C);
8958: PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
8959: return(0);
8960: }
8964: /*@
8965: MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T
8967: Neighbor-wise Collective on Mat
8969: Input Parameters:
8970: + A - the matrix
8971: - R - the projection matrix
8973: Output Parameters:
8974: . C - the (i,j) structure of the product matrix
8976: Notes:
8977: C will be created and must be destroyed by the user with MatDestroy().
8979: This routine is currently only implemented for pairs of SeqAIJ matrices and classes
8980: which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using
8981: this (i,j) structure by calling MatRARtNumeric().
8983: Level: intermediate
8985: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
8986: @*/
8987: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
8988: {
8994: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8995: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8996: if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
8999: MatCheckPreallocated(R,2);
9000: if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9001: if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9004: if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9005: if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9006: MatCheckPreallocated(A,1);
9007: PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9008: (*A->ops->rartsymbolic)(A,R,fill,C);
9009: PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);
9011: MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9012: return(0);
9013: }
9017: /*@
9018: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9020: Neighbor-wise Collective on Mat
9022: Input Parameters:
9023: + A - the left matrix
9024: . B - the right matrix
9025: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9026: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9027: if the result is a dense matrix this is irrelevent
9029: Output Parameters:
9030: . C - the product matrix
9032: Notes:
9033: Unless scall is MAT_REUSE_MATRIX C will be created.
9035: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9037: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9038: actually needed.
9040: If you have many matrices with the same non-zero structure to multiply, you
9041: should either
9042: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9043: $ 2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9045: Level: intermediate
9047: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP()
9048: @*/
9049: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9050: {
9052: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9053: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9054: PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9059: MatCheckPreallocated(A,1);
9060: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9061: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9064: MatCheckPreallocated(B,2);
9065: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9066: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9068: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9069: if (scall == MAT_REUSE_MATRIX) {
9072: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9073: PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9074: (*(*C)->ops->matmultnumeric)(A,B,*C);
9075: PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9076: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9077: return(0);
9078: }
9079: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9080: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9082: fA = A->ops->matmult;
9083: fB = B->ops->matmult;
9084: if (fB == fA) {
9085: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9086: mult = fB;
9087: } else {
9088: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9089: char multname[256];
9090: PetscStrcpy(multname,"MatMatMult_");
9091: PetscStrcat(multname,((PetscObject)A)->type_name);
9092: PetscStrcat(multname,"_");
9093: PetscStrcat(multname,((PetscObject)B)->type_name);
9094: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9095: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9096: if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9097: }
9098: PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9099: (*mult)(A,B,scall,fill,C);
9100: PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9101: return(0);
9102: }
9106: /*@
9107: MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9108: of the matrix-matrix product C=A*B. Call this routine before calling MatMatMultNumeric().
9110: Neighbor-wise Collective on Mat
9112: Input Parameters:
9113: + A - the left matrix
9114: . B - the right matrix
9115: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9116: if C is a dense matrix this is irrelevent
9118: Output Parameters:
9119: . C - the product matrix
9121: Notes:
9122: Unless scall is MAT_REUSE_MATRIX C will be created.
9124: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9125: actually needed.
9127: This routine is currently implemented for
9128: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9129: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9130: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9132: Level: intermediate
9134: Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9135: We should incorporate them into PETSc.
9137: .seealso: MatMatMult(), MatMatMultNumeric()
9138: @*/
9139: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9140: {
9142: PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9143: PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9144: PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;
9149: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9150: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9154: MatCheckPreallocated(B,2);
9155: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9156: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9159: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9160: if (fill == PETSC_DEFAULT) fill = 2.0;
9161: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9162: MatCheckPreallocated(A,1);
9164: Asymbolic = A->ops->matmultsymbolic;
9165: Bsymbolic = B->ops->matmultsymbolic;
9166: if (Asymbolic == Bsymbolic) {
9167: if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9168: symbolic = Bsymbolic;
9169: } else { /* dispatch based on the type of A and B */
9170: char symbolicname[256];
9171: PetscStrcpy(symbolicname,"MatMatMultSymbolic_");
9172: PetscStrcat(symbolicname,((PetscObject)A)->type_name);
9173: PetscStrcat(symbolicname,"_");
9174: PetscStrcat(symbolicname,((PetscObject)B)->type_name);
9175: PetscStrcat(symbolicname,"_C");
9176: PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9177: if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9178: }
9179: PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9180: (*symbolic)(A,B,fill,C);
9181: PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9182: return(0);
9183: }
9187: /*@
9188: MatMatMultNumeric - Performs the numeric matrix-matrix product.
9189: Call this routine after first calling MatMatMultSymbolic().
9191: Neighbor-wise Collective on Mat
9193: Input Parameters:
9194: + A - the left matrix
9195: - B - the right matrix
9197: Output Parameters:
9198: . C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().
9200: Notes:
9201: C must have been created with MatMatMultSymbolic().
9203: This routine is currently implemented for
9204: - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9205: - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9206: - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.
9208: Level: intermediate
9210: .seealso: MatMatMult(), MatMatMultSymbolic()
9211: @*/
9212: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9213: {
9217: MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9218: return(0);
9219: }
9223: /*@
9224: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9226: Neighbor-wise Collective on Mat
9228: Input Parameters:
9229: + A - the left matrix
9230: . B - the right matrix
9231: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9232: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9234: Output Parameters:
9235: . C - the product matrix
9237: Notes:
9238: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9240: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9242: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9243: actually needed.
9245: This routine is currently only implemented for pairs of SeqAIJ matrices. C will be of type MATSEQAIJ.
9247: Level: intermediate
9249: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9250: @*/
9251: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9252: {
9254: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9255: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9260: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9261: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9264: MatCheckPreallocated(B,2);
9265: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9266: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9268: if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9269: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9270: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9271: MatCheckPreallocated(A,1);
9273: fA = A->ops->mattransposemult;
9274: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9275: fB = B->ops->mattransposemult;
9276: if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9277: if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9279: PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9280: if (scall == MAT_INITIAL_MATRIX) {
9281: PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9282: (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9283: PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9284: }
9285: PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9286: (*A->ops->mattransposemultnumeric)(A,B,*C);
9287: PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9288: PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9289: return(0);
9290: }
9294: /*@
9295: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9297: Neighbor-wise Collective on Mat
9299: Input Parameters:
9300: + A - the left matrix
9301: . B - the right matrix
9302: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9303: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9305: Output Parameters:
9306: . C - the product matrix
9308: Notes:
9309: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9311: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9313: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9314: actually needed.
9316: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9317: which inherit from SeqAIJ. C will be of same type as the input matrices.
9319: Level: intermediate
9321: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9322: @*/
9323: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9324: {
9326: PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9327: PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9328: PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;
9333: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9334: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9337: MatCheckPreallocated(B,2);
9338: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9339: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9341: if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9342: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9343: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9344: MatCheckPreallocated(A,1);
9346: fA = A->ops->transposematmult;
9347: fB = B->ops->transposematmult;
9348: if (fB==fA) {
9349: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9350: transposematmult = fA;
9351: } else {
9352: /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9353: char multname[256];
9354: PetscStrcpy(multname,"MatTransposeMatMult_");
9355: PetscStrcat(multname,((PetscObject)A)->type_name);
9356: PetscStrcat(multname,"_");
9357: PetscStrcat(multname,((PetscObject)B)->type_name);
9358: PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9359: PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9360: if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9361: }
9362: PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9363: (*transposematmult)(A,B,scall,fill,C);
9364: PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9365: return(0);
9366: }
9370: /*@
9371: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9373: Neighbor-wise Collective on Mat
9375: Input Parameters:
9376: + A - the left matrix
9377: . B - the middle matrix
9378: . C - the right matrix
9379: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9380: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9381: if the result is a dense matrix this is irrelevent
9383: Output Parameters:
9384: . D - the product matrix
9386: Notes:
9387: Unless scall is MAT_REUSE_MATRIX D will be created.
9389: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
9391: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9392: actually needed.
9394: If you have many matrices with the same non-zero structure to multiply, you
9395: should either
9396: $ 1) use MAT_REUSE_MATRIX in all calls but the first or
9397: $ 2) call MatMatMatMultSymbolic() once and then MatMatMatMultNumeric() for each product needed
9399: Level: intermediate
9401: .seealso: MatMatMult, MatPtAP()
9402: @*/
9403: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9404: {
9406: PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9407: PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9408: PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9409: PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9414: MatCheckPreallocated(A,1);
9415: if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9416: if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9419: MatCheckPreallocated(B,2);
9420: if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9421: if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9424: MatCheckPreallocated(C,3);
9425: if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9426: if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9427: if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9428: if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
9429: if (scall == MAT_REUSE_MATRIX) {
9432: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9433: (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
9434: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9435: return(0);
9436: }
9437: if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9438: if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9440: fA = A->ops->matmatmult;
9441: fB = B->ops->matmatmult;
9442: fC = C->ops->matmatmult;
9443: if (fA == fB && fA == fC) {
9444: if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9445: mult = fA;
9446: } else {
9447: /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
9448: char multname[256];
9449: PetscStrcpy(multname,"MatMatMatMult_");
9450: PetscStrcat(multname,((PetscObject)A)->type_name);
9451: PetscStrcat(multname,"_");
9452: PetscStrcat(multname,((PetscObject)B)->type_name);
9453: PetscStrcat(multname,"_");
9454: PetscStrcat(multname,((PetscObject)C)->type_name);
9455: PetscStrcat(multname,"_C");
9456: PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9457: if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
9458: }
9459: PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9460: (*mult)(A,B,C,scall,fill,D);
9461: PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9462: return(0);
9463: }
9467: /*@C
9468: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
9470: Collective on Mat
9472: Input Parameters:
9473: + mat - the matrix
9474: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9475: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9476: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9478: Output Parameter:
9479: . matredundant - redundant matrix
9481: Notes:
9482: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9483: original matrix has not changed from that last call to MatCreateRedundantMatrix().
9485: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9486: calling it.
9488: Level: advanced
9490: Concepts: subcommunicator
9491: Concepts: duplicate matrix
9493: .seealso: MatDestroy()
9494: @*/
9495: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9496: {
9498: MPI_Comm comm;
9499: PetscMPIInt size;
9500: PetscInt mloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
9501: Mat_Redundant *redund=NULL;
9502: PetscSubcomm psubcomm=NULL;
9503: MPI_Comm subcomm_in=subcomm;
9504: Mat *matseq;
9505: IS isrow,iscol;
9506: PetscBool newsubcomm=PETSC_FALSE;
9509: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
9510: if (size == 1 || nsubcomm == 1) {
9511: if (reuse == MAT_INITIAL_MATRIX) {
9512: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
9513: } else {
9514: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
9515: }
9516: return(0);
9517: }
9520: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9523: }
9524: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9525: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9526: MatCheckPreallocated(mat,1);
9528: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
9529: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
9530: /* create psubcomm, then get subcomm */
9531: PetscObjectGetComm((PetscObject)mat,&comm);
9532: MPI_Comm_size(comm,&size);
9533: if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);
9535: PetscSubcommCreate(comm,&psubcomm);
9536: PetscSubcommSetNumber(psubcomm,nsubcomm);
9537: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
9538: PetscSubcommSetFromOptions(psubcomm);
9539: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
9540: newsubcomm = PETSC_TRUE;
9541: PetscSubcommDestroy(&psubcomm);
9542: }
9544: /* get isrow, iscol and a local sequential matrix matseq[0] */
9545: if (reuse == MAT_INITIAL_MATRIX) {
9546: mloc_sub = PETSC_DECIDE;
9547: if (bs < 1) {
9548: PetscSplitOwnership(subcomm,&mloc_sub,&M);
9549: } else {
9550: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
9551: }
9552: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
9553: rstart = rend - mloc_sub;
9554: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
9555: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
9556: } else { /* reuse == MAT_REUSE_MATRIX */
9557: /* retrieve subcomm */
9558: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
9559: redund = (*matredundant)->redundant;
9560: isrow = redund->isrow;
9561: iscol = redund->iscol;
9562: matseq = redund->matseq;
9563: }
9564: MatGetSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
9566: /* get matredundant over subcomm */
9567: if (reuse == MAT_INITIAL_MATRIX) {
9568: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],mloc_sub,reuse,matredundant);
9570: /* create a supporting struct and attach it to C for reuse */
9571: PetscNewLog(*matredundant,&redund);
9572: (*matredundant)->redundant = redund;
9573: redund->isrow = isrow;
9574: redund->iscol = iscol;
9575: redund->matseq = matseq;
9576: if (newsubcomm) {
9577: redund->subcomm = subcomm;
9578: } else {
9579: redund->subcomm = MPI_COMM_NULL;
9580: }
9581: } else {
9582: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
9583: }
9584: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
9585: return(0);
9586: }
9590: /*@C
9591: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
9592: a given 'mat' object. Each submatrix can span multiple procs.
9594: Collective on Mat
9596: Input Parameters:
9597: + mat - the matrix
9598: . subcomm - the subcommunicator obtained by com_split(comm)
9599: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9601: Output Parameter:
9602: . subMat - 'parallel submatrices each spans a given subcomm
9604: Notes:
9605: The submatrix partition across processors is dictated by 'subComm' a
9606: communicator obtained by com_split(comm). The comm_split
9607: is not restriced to be grouped with consecutive original ranks.
9609: Due the comm_split() usage, the parallel layout of the submatrices
9610: map directly to the layout of the original matrix [wrt the local
9611: row,col partitioning]. So the original 'DiagonalMat' naturally maps
9612: into the 'DiagonalMat' of the subMat, hence it is used directly from
9613: the subMat. However the offDiagMat looses some columns - and this is
9614: reconstructed with MatSetValues()
9616: Level: advanced
9618: Concepts: subcommunicator
9619: Concepts: submatrices
9621: .seealso: MatGetSubMatrices()
9622: @*/
9623: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
9624: {
9626: PetscMPIInt commsize,subCommSize;
9629: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
9630: MPI_Comm_size(subComm,&subCommSize);
9631: if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);
9633: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
9634: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
9635: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
9636: return(0);
9637: }
9641: /*@
9642: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
9644: Not Collective
9646: Input Arguments:
9647: mat - matrix to extract local submatrix from
9648: isrow - local row indices for submatrix
9649: iscol - local column indices for submatrix
9651: Output Arguments:
9652: submat - the submatrix
9654: Level: intermediate
9656: Notes:
9657: The submat should be returned with MatRestoreLocalSubMatrix().
9659: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
9660: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
9662: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
9663: MatSetValuesBlockedLocal() will also be implemented.
9665: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef()
9666: @*/
9667: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9668: {
9678: if (mat->ops->getlocalsubmatrix) {
9679: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
9680: } else {
9681: MatCreateLocalRef(mat,isrow,iscol,submat);
9682: }
9683: return(0);
9684: }
9688: /*@
9689: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
9691: Not Collective
9693: Input Arguments:
9694: mat - matrix to extract local submatrix from
9695: isrow - local row indices for submatrix
9696: iscol - local column indices for submatrix
9697: submat - the submatrix
9699: Level: intermediate
9701: .seealso: MatGetLocalSubMatrix()
9702: @*/
9703: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9704: {
9713: if (*submat) {
9715: }
9717: if (mat->ops->restorelocalsubmatrix) {
9718: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
9719: } else {
9720: MatDestroy(submat);
9721: }
9722: *submat = NULL;
9723: return(0);
9724: }
9726: /* --------------------------------------------------------*/
9729: /*@
9730: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no entry in the matrix
9732: Collective on Mat
9734: Input Parameter:
9735: . mat - the matrix
9737: Output Parameter:
9738: . is - if any rows have zero diagonals this contains the list of them
9740: Level: developer
9742: Concepts: matrix-vector product
9744: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9745: @*/
9746: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
9747: {
9753: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9754: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9756: if (!mat->ops->findzerodiagonals) {
9757: Vec diag;
9758: const PetscScalar *a;
9759: PetscInt *rows;
9760: PetscInt rStart, rEnd, r, nrow = 0;
9762: MatCreateVecs(mat, &diag, NULL);
9763: MatGetDiagonal(mat, diag);
9764: MatGetOwnershipRange(mat, &rStart, &rEnd);
9765: VecGetArrayRead(diag, &a);
9766: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
9767: PetscMalloc1(nrow, &rows);
9768: nrow = 0;
9769: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
9770: VecRestoreArrayRead(diag, &a);
9771: VecDestroy(&diag);
9772: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
9773: } else {
9774: (*mat->ops->findzerodiagonals)(mat, is);
9775: }
9776: return(0);
9777: }
9781: /*@
9782: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
9784: Collective on Mat
9786: Input Parameter:
9787: . mat - the matrix
9789: Output Parameter:
9790: . is - contains the list of rows with off block diagonal entries
9792: Level: developer
9794: Concepts: matrix-vector product
9796: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9797: @*/
9798: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
9799: {
9805: if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9806: if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9808: if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
9809: (*mat->ops->findoffblockdiagonalentries)(mat,is);
9810: return(0);
9811: }
9815: /*@C
9816: MatInvertBlockDiagonal - Inverts the block diagonal entries.
9818: Collective on Mat
9820: Input Parameters:
9821: . mat - the matrix
9823: Output Parameters:
9824: . values - the block inverses in column major order (FORTRAN-like)
9826: Note:
9827: This routine is not available from Fortran.
9829: Level: advanced
9830: @*/
9831: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
9832: {
9837: if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9838: if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9839: if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
9840: (*mat->ops->invertblockdiagonal)(mat,values);
9841: return(0);
9842: }
9846: /*@C
9847: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
9848: via MatTransposeColoringCreate().
9850: Collective on MatTransposeColoring
9852: Input Parameter:
9853: . c - coloring context
9855: Level: intermediate
9857: .seealso: MatTransposeColoringCreate()
9858: @*/
9859: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
9860: {
9861: PetscErrorCode ierr;
9862: MatTransposeColoring matcolor=*c;
9865: if (!matcolor) return(0);
9866: if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}
9868: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
9869: PetscFree(matcolor->rows);
9870: PetscFree(matcolor->den2sp);
9871: PetscFree(matcolor->colorforcol);
9872: PetscFree(matcolor->columns);
9873: if (matcolor->brows>0) {
9874: PetscFree(matcolor->lstart);
9875: }
9876: PetscHeaderDestroy(c);
9877: return(0);
9878: }
9882: /*@C
9883: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
9884: a MatTransposeColoring context has been created, computes a dense B^T by Apply
9885: MatTransposeColoring to sparse B.
9887: Collective on MatTransposeColoring
9889: Input Parameters:
9890: + B - sparse matrix B
9891: . Btdense - symbolic dense matrix B^T
9892: - coloring - coloring context created with MatTransposeColoringCreate()
9894: Output Parameter:
9895: . Btdense - dense matrix B^T
9897: Options Database Keys:
9898: + -mat_transpose_coloring_view - Activates basic viewing or coloring
9899: . -mat_transpose_coloring_view_draw - Activates drawing of coloring
9900: - -mat_transpose_coloring_view_info - Activates viewing of coloring info
9902: Level: intermediate
9904: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy()
9906: .keywords: coloring
9907: @*/
9908: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
9909: {
9917: if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
9918: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
9919: return(0);
9920: }
9924: /*@C
9925: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
9926: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
9927: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
9928: Csp from Cden.
9930: Collective on MatTransposeColoring
9932: Input Parameters:
9933: + coloring - coloring context created with MatTransposeColoringCreate()
9934: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
9936: Output Parameter:
9937: . Csp - sparse matrix
9939: Options Database Keys:
9940: + -mat_multtranspose_coloring_view - Activates basic viewing or coloring
9941: . -mat_multtranspose_coloring_view_draw - Activates drawing of coloring
9942: - -mat_multtranspose_coloring_view_info - Activates viewing of coloring info
9944: Level: intermediate
9946: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
9948: .keywords: coloring
9949: @*/
9950: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
9951: {
9959: if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
9960: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
9961: return(0);
9962: }
9966: /*@C
9967: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
9969: Collective on Mat
9971: Input Parameters:
9972: + mat - the matrix product C
9973: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
9975: Output Parameter:
9976: . color - the new coloring context
9978: Level: intermediate
9980: .seealso: MatTransposeColoringDestroy(), MatTransposeColoringSetFromOptions(), MatTransColoringApplySpToDen(),
9981: MatTransColoringApplyDenToSp(), MatTransposeColoringView(),
9982: @*/
9983: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
9984: {
9985: MatTransposeColoring c;
9986: MPI_Comm comm;
9987: PetscErrorCode ierr;
9990: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
9991: PetscObjectGetComm((PetscObject)mat,&comm);
9992: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
9994: c->ctype = iscoloring->ctype;
9995: if (mat->ops->transposecoloringcreate) {
9996: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
9997: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");
9999: *color = c;
10000: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10001: return(0);
10002: }
10006: /*@
10007: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10008: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10009: same, otherwise it will be larger
10011: Not Collective
10013: Input Parameter:
10014: . A - the matrix
10016: Output Parameter:
10017: . state - the current state
10019: Notes: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10020: different matrices
10022: Level: intermediate
10024: @*/
10025: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10026: {
10029: *state = mat->nonzerostate;
10030: return(0);
10031: }
10035: /*@
10036: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10037: matrices from each processor
10039: Collective on MPI_Comm
10041: Input Parameters:
10042: + comm - the communicators the parallel matrix will live on
10043: . seqmat - the input sequential matrices
10044: . n - number of local columns (or PETSC_DECIDE)
10045: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10047: Output Parameter:
10048: . mpimat - the parallel matrix generated
10050: Level: advanced
10052: Notes: The number of columns of the matrix in EACH processor MUST be the same.
10054: @*/
10055: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10056: {
10058: PetscMPIInt size;
10061: MPI_Comm_size(comm,&size);
10062: if (size == 1) {
10063: if (reuse == MAT_INITIAL_MATRIX) {
10064: MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
10065: } else {
10066: MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
10067: }
10068: return(0);
10069: }
10071: if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10072: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10073: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10074: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10075: return(0);
10076: }
10080: /*@
10081: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10082: ranks' ownership ranges.
10084: Collective on A
10086: Input Parameters:
10087: + A - the matrix to create subdomains from
10088: - N - requested number of subdomains
10091: Output Parameters:
10092: + n - number of subdomains resulting on this rank
10093: - iss - IS list with indices of subdomains on this rank
10095: Level: advanced
10097: Notes: number of subdomains must be smaller than the communicator size
10098: @*/
10099: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10100: {
10101: MPI_Comm comm,subcomm;
10102: PetscMPIInt size,rank,color,subsize,subrank;
10103: PetscInt rstart,rend,k;
10104: PetscErrorCode ierr;
10107: PetscObjectGetComm((PetscObject)A,&comm);
10108: MPI_Comm_size(comm,&size);
10109: MPI_Comm_rank(comm,&rank);
10110: if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10111: *n = 1;
10112: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10113: color = rank/k;
10114: MPI_Comm_split(comm,color,rank,&subcomm);
10115: MPI_Comm_size(subcomm,&subsize);
10116: MPI_Comm_size(subcomm,&subrank);
10117: PetscMalloc1(1,iss);
10118: MatGetOwnershipRange(A,&rstart,&rend);
10119: ISCreateStride(subcomm,rend-rstart,rstart,1,*iss);
10120: return(0);
10121: }