Actual source code: matrix.c
1: /*
2: This is where the abstract matrix operations are defined
3: */
5: #include <petsc/private/matimpl.h>
6: #include <petsc/private/isimpl.h>
7: #include <petsc/private/vecimpl.h>
9: /* Logging support */
10: PetscClassId MAT_CLASSID;
11: PetscClassId MAT_COLORING_CLASSID;
12: PetscClassId MAT_FDCOLORING_CLASSID;
13: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;
15: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultAdd, MAT_MultTranspose;
16: PetscLogEvent MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve,MAT_MatTrSolve;
17: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
18: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
19: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
20: PetscLogEvent MAT_QRFactorNumeric, MAT_QRFactorSymbolic, MAT_QRFactor;
21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_CreateSubMats, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_PartitioningND, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_CreateSubMat;
24: PetscLogEvent MAT_TransposeColoringCreate;
25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_GetBrowsOfAcols;
32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
34: PetscLogEvent MAT_GetMultiProcBlock;
35: PetscLogEvent MAT_CUSPARSECopyToGPU, MAT_CUSPARSECopyFromGPU, MAT_CUSPARSEGenerateTranspose, MAT_CUSPARSESolveAnalysis;
36: PetscLogEvent MAT_PreallCOO, MAT_SetVCOO;
37: PetscLogEvent MAT_SetValuesBatch;
38: PetscLogEvent MAT_ViennaCLCopyToGPU;
39: PetscLogEvent MAT_DenseCopyToGPU, MAT_DenseCopyFromGPU;
40: PetscLogEvent MAT_Merge,MAT_Residual,MAT_SetRandom;
41: PetscLogEvent MAT_FactorFactS,MAT_FactorInvS;
42: PetscLogEvent MATCOLORING_Apply,MATCOLORING_Comm,MATCOLORING_Local,MATCOLORING_ISCreate,MATCOLORING_SetUp,MATCOLORING_Weights;
43: PetscLogEvent MAT_H2Opus_Build,MAT_H2Opus_Compress,MAT_H2Opus_Orthog,MAT_H2Opus_LR;
45: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","QR","MatFactorType","MAT_FACTOR_",NULL};
47: /*@
48: MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated but not been assembled it randomly selects appropriate locations,
49: for sparse matrices that already have locations it fills the locations with random numbers
51: Logically Collective on Mat
53: Input Parameters:
54: + x - the matrix
55: - rctx - the random number context, formed by PetscRandomCreate(), or NULL and
56: it will create one internally.
58: Output Parameter:
59: . x - the matrix
61: Example of Usage:
62: .vb
63: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
64: MatSetRandom(x,rctx);
65: PetscRandomDestroy(rctx);
66: .ve
68: Level: intermediate
70: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
71: @*/
72: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
73: {
74: PetscRandom randObj = NULL;
79: MatCheckPreallocated(x,1);
83: if (!rctx) {
84: MPI_Comm comm;
85: PetscObjectGetComm((PetscObject)x,&comm);
86: PetscRandomCreate(comm,&randObj);
87: PetscRandomSetFromOptions(randObj);
88: rctx = randObj;
89: }
90: PetscLogEventBegin(MAT_SetRandom,x,rctx,0,0);
91: (*x->ops->setrandom)(x,rctx);
92: PetscLogEventEnd(MAT_SetRandom,x,rctx,0,0);
94: MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);
95: MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);
96: PetscRandomDestroy(&randObj);
97: return 0;
98: }
100: /*@
101: MatFactorGetErrorZeroPivot - returns the pivot value that was determined to be zero and the row it occurred in
103: Logically Collective on Mat
105: Input Parameter:
106: . mat - the factored matrix
108: Output Parameters:
109: + pivot - the pivot value computed
110: - row - the row that the zero pivot occurred. Note that this row must be interpreted carefully due to row reorderings and which processes
111: the share the matrix
113: Level: advanced
115: Notes:
116: This routine does not work for factorizations done with external packages.
118: This routine should only be called if MatGetFactorError() returns a value of MAT_FACTOR_NUMERIC_ZEROPIVOT
120: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
122: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatLUFactorSymbolic(), MatCholeskyFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
123: @*/
124: PetscErrorCode MatFactorGetErrorZeroPivot(Mat mat,PetscReal *pivot,PetscInt *row)
125: {
129: *pivot = mat->factorerror_zeropivot_value;
130: *row = mat->factorerror_zeropivot_row;
131: return 0;
132: }
134: /*@
135: MatFactorGetError - gets the error code from a factorization
137: Logically Collective on Mat
139: Input Parameters:
140: . mat - the factored matrix
142: Output Parameter:
143: . err - the error code
145: Level: advanced
147: Notes:
148: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
150: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatLUFactorSymbolic(), MatCholeskyFactorSymbolic(), MatFactorClearError(), MatFactorGetErrorZeroPivot()
151: @*/
152: PetscErrorCode MatFactorGetError(Mat mat,MatFactorError *err)
153: {
156: *err = mat->factorerrortype;
157: return 0;
158: }
160: /*@
161: MatFactorClearError - clears the error code in a factorization
163: Logically Collective on Mat
165: Input Parameter:
166: . mat - the factored matrix
168: Level: developer
170: Notes:
171: This can be called on non-factored matrices that come from, for example, matrices used in SOR.
173: .seealso: MatZeroEntries(), MatFactor(), MatGetFactor(), MatLUFactorSymbolic(), MatCholeskyFactorSymbolic(), MatFactorGetError(), MatFactorGetErrorZeroPivot()
174: @*/
175: PetscErrorCode MatFactorClearError(Mat mat)
176: {
178: mat->factorerrortype = MAT_FACTOR_NOERROR;
179: mat->factorerror_zeropivot_value = 0.0;
180: mat->factorerror_zeropivot_row = 0;
181: return 0;
182: }
184: PETSC_INTERN PetscErrorCode MatFindNonzeroRowsOrCols_Basic(Mat mat,PetscBool cols,PetscReal tol,IS *nonzero)
185: {
186: Vec r,l;
187: const PetscScalar *al;
188: PetscInt i,nz,gnz,N,n;
190: MatCreateVecs(mat,&r,&l);
191: if (!cols) { /* nonzero rows */
192: MatGetSize(mat,&N,NULL);
193: MatGetLocalSize(mat,&n,NULL);
194: VecSet(l,0.0);
195: VecSetRandom(r,NULL);
196: MatMult(mat,r,l);
197: VecGetArrayRead(l,&al);
198: } else { /* nonzero columns */
199: MatGetSize(mat,NULL,&N);
200: MatGetLocalSize(mat,NULL,&n);
201: VecSet(r,0.0);
202: VecSetRandom(l,NULL);
203: MatMultTranspose(mat,l,r);
204: VecGetArrayRead(r,&al);
205: }
206: if (tol <= 0.0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nz++; }
207: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nz++; }
208: MPIU_Allreduce(&nz,&gnz,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)mat));
209: if (gnz != N) {
210: PetscInt *nzr;
211: PetscMalloc1(nz,&nzr);
212: if (nz) {
213: if (tol < 0) { for (i=0,nz=0;i<n;i++) if (al[i] != 0.0) nzr[nz++] = i; }
214: else { for (i=0,nz=0;i<n;i++) if (PetscAbsScalar(al[i]) > tol) nzr[nz++] = i; }
215: }
216: ISCreateGeneral(PetscObjectComm((PetscObject)mat),nz,nzr,PETSC_OWN_POINTER,nonzero);
217: } else *nonzero = NULL;
218: if (!cols) { /* nonzero rows */
219: VecRestoreArrayRead(l,&al);
220: } else {
221: VecRestoreArrayRead(r,&al);
222: }
223: VecDestroy(&l);
224: VecDestroy(&r);
225: return 0;
226: }
228: /*@
229: MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix
231: Input Parameter:
232: . A - the matrix
234: Output Parameter:
235: . keptrows - the rows that are not completely zero
237: Notes:
238: keptrows is set to NULL if all rows are nonzero.
240: Level: intermediate
242: @*/
243: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
244: {
250: if (mat->ops->findnonzerorows) {
251: (*mat->ops->findnonzerorows)(mat,keptrows);
252: } else {
253: MatFindNonzeroRowsOrCols_Basic(mat,PETSC_FALSE,0.0,keptrows);
254: }
255: return 0;
256: }
258: /*@
259: MatFindZeroRows - Locate all rows that are completely zero in the matrix
261: Input Parameter:
262: . A - the matrix
264: Output Parameter:
265: . zerorows - the rows that are completely zero
267: Notes:
268: zerorows is set to NULL if no rows are zero.
270: Level: intermediate
272: @*/
273: PetscErrorCode MatFindZeroRows(Mat mat,IS *zerorows)
274: {
275: IS keptrows;
276: PetscInt m, n;
281: MatFindNonzeroRows(mat, &keptrows);
282: /* MatFindNonzeroRows sets keptrows to NULL if there are no zero rows.
283: In keeping with this convention, we set zerorows to NULL if there are no zero
284: rows. */
285: if (keptrows == NULL) {
286: *zerorows = NULL;
287: } else {
288: MatGetOwnershipRange(mat,&m,&n);
289: ISComplement(keptrows,m,n,zerorows);
290: ISDestroy(&keptrows);
291: }
292: return 0;
293: }
295: /*@
296: MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling
298: Not Collective
300: Input Parameters:
301: . A - the matrix
303: Output Parameters:
304: . a - the diagonal part (which is a SEQUENTIAL matrix)
306: Notes:
307: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
308: Use caution, as the reference count on the returned matrix is not incremented and it is used as
309: part of the containing MPI Mat's normal operation.
311: Level: advanced
313: @*/
314: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
315: {
320: if (A->ops->getdiagonalblock) {
321: (*A->ops->getdiagonalblock)(A,a);
322: } else {
323: PetscMPIInt size;
325: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
327: *a = A;
328: }
329: return 0;
330: }
332: /*@
333: MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.
335: Collective on Mat
337: Input Parameters:
338: . mat - the matrix
340: Output Parameter:
341: . trace - the sum of the diagonal entries
343: Level: advanced
345: @*/
346: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
347: {
348: Vec diag;
352: MatCreateVecs(mat,&diag,NULL);
353: MatGetDiagonal(mat,diag);
354: VecSum(diag,trace);
355: VecDestroy(&diag);
356: return 0;
357: }
359: /*@
360: MatRealPart - Zeros out the imaginary part of the matrix
362: Logically Collective on Mat
364: Input Parameters:
365: . mat - the matrix
367: Level: advanced
369: .seealso: MatImaginaryPart()
370: @*/
371: PetscErrorCode MatRealPart(Mat mat)
372: {
378: MatCheckPreallocated(mat,1);
379: (*mat->ops->realpart)(mat);
380: return 0;
381: }
383: /*@C
384: MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix
386: Collective on Mat
388: Input Parameter:
389: . mat - the matrix
391: Output Parameters:
392: + nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
393: - ghosts - the global indices of the ghost points
395: Notes:
396: the nghosts and ghosts are suitable to pass into VecCreateGhost()
398: Level: advanced
400: @*/
401: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
402: {
407: if (mat->ops->getghosts) {
408: (*mat->ops->getghosts)(mat,nghosts,ghosts);
409: } else {
410: if (nghosts) *nghosts = 0;
411: if (ghosts) *ghosts = NULL;
412: }
413: return 0;
414: }
416: /*@
417: MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part
419: Logically Collective on Mat
421: Input Parameters:
422: . mat - the matrix
424: Level: advanced
426: .seealso: MatRealPart()
427: @*/
428: PetscErrorCode MatImaginaryPart(Mat mat)
429: {
435: MatCheckPreallocated(mat,1);
436: (*mat->ops->imaginarypart)(mat);
437: return 0;
438: }
440: /*@
441: MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)
443: Not Collective
445: Input Parameter:
446: . mat - the matrix
448: Output Parameters:
449: + missing - is any diagonal missing
450: - dd - first diagonal entry that is missing (optional) on this process
452: Level: advanced
454: .seealso: MatRealPart()
455: @*/
456: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
457: {
464: (*mat->ops->missingdiagonal)(mat,missing,dd);
465: return 0;
466: }
468: /*@C
469: MatGetRow - Gets a row of a matrix. You MUST call MatRestoreRow()
470: for each row that you get to ensure that your application does
471: not bleed memory.
473: Not Collective
475: Input Parameters:
476: + mat - the matrix
477: - row - the row to get
479: Output Parameters:
480: + ncols - if not NULL, the number of nonzeros in the row
481: . cols - if not NULL, the column numbers
482: - vals - if not NULL, the values
484: Notes:
485: This routine is provided for people who need to have direct access
486: to the structure of a matrix. We hope that we provide enough
487: high-level matrix routines that few users will need it.
489: MatGetRow() always returns 0-based column indices, regardless of
490: whether the internal representation is 0-based (default) or 1-based.
492: For better efficiency, set cols and/or vals to NULL if you do
493: not wish to extract these quantities.
495: The user can only examine the values extracted with MatGetRow();
496: the values cannot be altered. To change the matrix entries, one
497: must use MatSetValues().
499: You can only have one call to MatGetRow() outstanding for a particular
500: matrix at a time, per processor. MatGetRow() can only obtain rows
501: associated with the given processor, it cannot get rows from the
502: other processors; for that we suggest using MatCreateSubMatrices(), then
503: MatGetRow() on the submatrix. The row index passed to MatGetRow()
504: is in the global number of rows.
506: Fortran Notes:
507: The calling sequence from Fortran is
508: .vb
509: MatGetRow(matrix,row,ncols,cols,values,ierr)
510: Mat matrix (input)
511: integer row (input)
512: integer ncols (output)
513: integer cols(maxcols) (output)
514: double precision (or double complex) values(maxcols) output
515: .ve
516: where maxcols >= maximum nonzeros in any row of the matrix.
518: Caution:
519: Do not try to change the contents of the output arrays (cols and vals).
520: In some cases, this may corrupt the matrix.
522: Level: advanced
524: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatCreateSubMatrices(), MatGetDiagonal()
525: @*/
526: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
527: {
528: PetscInt incols;
535: MatCheckPreallocated(mat,1);
537: PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
538: (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
539: if (ncols) *ncols = incols;
540: PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
541: return 0;
542: }
544: /*@
545: MatConjugate - replaces the matrix values with their complex conjugates
547: Logically Collective on Mat
549: Input Parameters:
550: . mat - the matrix
552: Level: advanced
554: .seealso: VecConjugate()
555: @*/
556: PetscErrorCode MatConjugate(Mat mat)
557: {
560: if (PetscDefined(USE_COMPLEX)) {
562: (*mat->ops->conjugate)(mat);
563: }
564: return 0;
565: }
567: /*@C
568: MatRestoreRow - Frees any temporary space allocated by MatGetRow().
570: Not Collective
572: Input Parameters:
573: + mat - the matrix
574: . row - the row to get
575: . ncols, cols - the number of nonzeros and their columns
576: - vals - if nonzero the column values
578: Notes:
579: This routine should be called after you have finished examining the entries.
581: This routine zeros out ncols, cols, and vals. This is to prevent accidental
582: us of the array after it has been restored. If you pass NULL, it will
583: not zero the pointers. Use of cols or vals after MatRestoreRow is invalid.
585: Fortran Notes:
586: The calling sequence from Fortran is
587: .vb
588: MatRestoreRow(matrix,row,ncols,cols,values,ierr)
589: Mat matrix (input)
590: integer row (input)
591: integer ncols (output)
592: integer cols(maxcols) (output)
593: double precision (or double complex) values(maxcols) output
594: .ve
595: Where maxcols >= maximum nonzeros in any row of the matrix.
597: In Fortran MatRestoreRow() MUST be called after MatGetRow()
598: before another call to MatGetRow() can be made.
600: Level: advanced
602: .seealso: MatGetRow()
603: @*/
604: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
605: {
609: if (!mat->ops->restorerow) return 0;
610: (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
611: if (ncols) *ncols = 0;
612: if (cols) *cols = NULL;
613: if (vals) *vals = NULL;
614: return 0;
615: }
617: /*@
618: MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
619: You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.
621: Not Collective
623: Input Parameters:
624: . mat - the matrix
626: Notes:
627: The flag is to ensure that users are aware of MatGetRow() only provides the upper triangular part of the row for the matrices in MATSBAIJ format.
629: Level: advanced
631: .seealso: MatRestoreRowUpperTriangular()
632: @*/
633: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
634: {
639: MatCheckPreallocated(mat,1);
640: if (!mat->ops->getrowuppertriangular) return 0;
641: (*mat->ops->getrowuppertriangular)(mat);
642: return 0;
643: }
645: /*@
646: MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.
648: Not Collective
650: Input Parameters:
651: . mat - the matrix
653: Notes:
654: This routine should be called after you have finished MatGetRow/MatRestoreRow().
656: Level: advanced
658: .seealso: MatGetRowUpperTriangular()
659: @*/
660: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
661: {
666: MatCheckPreallocated(mat,1);
667: if (!mat->ops->restorerowuppertriangular) return 0;
668: (*mat->ops->restorerowuppertriangular)(mat);
669: return 0;
670: }
672: /*@C
673: MatSetOptionsPrefix - Sets the prefix used for searching for all
674: Mat options in the database.
676: Logically Collective on Mat
678: Input Parameters:
679: + A - the Mat context
680: - prefix - the prefix to prepend to all option names
682: Notes:
683: A hyphen (-) must NOT be given at the beginning of the prefix name.
684: The first character of all runtime options is AUTOMATICALLY the hyphen.
686: Level: advanced
688: .seealso: MatSetFromOptions()
689: @*/
690: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
691: {
693: PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
694: return 0;
695: }
697: /*@C
698: MatAppendOptionsPrefix - Appends to the prefix used for searching for all
699: Mat options in the database.
701: Logically Collective on Mat
703: Input Parameters:
704: + A - the Mat context
705: - prefix - the prefix to prepend to all option names
707: Notes:
708: A hyphen (-) must NOT be given at the beginning of the prefix name.
709: The first character of all runtime options is AUTOMATICALLY the hyphen.
711: Level: advanced
713: .seealso: MatGetOptionsPrefix()
714: @*/
715: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
716: {
718: PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
719: return 0;
720: }
722: /*@C
723: MatGetOptionsPrefix - Gets the prefix used for searching for all
724: Mat options in the database.
726: Not Collective
728: Input Parameter:
729: . A - the Mat context
731: Output Parameter:
732: . prefix - pointer to the prefix string used
734: Notes:
735: On the fortran side, the user should pass in a string 'prefix' of
736: sufficient length to hold the prefix.
738: Level: advanced
740: .seealso: MatAppendOptionsPrefix()
741: @*/
742: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
743: {
746: PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
747: return 0;
748: }
750: /*@
751: MatResetPreallocation - Reset mat to use the original nonzero pattern provided by users.
753: Collective on Mat
755: Input Parameters:
756: . A - the Mat context
758: Notes:
759: The allocated memory will be shrunk after calling MatAssembly with MAT_FINAL_ASSEMBLY. Users can reset the preallocation to access the original memory.
760: Currently support MPIAIJ and SEQAIJ.
762: Level: beginner
764: .seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatXAIJSetPreallocation()
765: @*/
766: PetscErrorCode MatResetPreallocation(Mat A)
767: {
770: PetscUseMethod(A,"MatResetPreallocation_C",(Mat),(A));
771: return 0;
772: }
774: /*@
775: MatSetUp - Sets up the internal matrix data structures for later use.
777: Collective on Mat
779: Input Parameters:
780: . A - the Mat context
782: Notes:
783: If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.
785: If a suitable preallocation routine is used, this function does not need to be called.
787: See the Performance chapter of the PETSc users manual for how to preallocate matrices
789: Level: beginner
791: .seealso: MatCreate(), MatDestroy()
792: @*/
793: PetscErrorCode MatSetUp(Mat A)
794: {
796: if (!((PetscObject)A)->type_name) {
797: PetscMPIInt size;
799: MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
800: MatSetType(A, size == 1 ? MATSEQAIJ : MATMPIAIJ);
801: }
802: if (!A->preallocated && A->ops->setup) {
803: PetscInfo(A,"Warning not preallocating matrix storage\n");
804: (*A->ops->setup)(A);
805: }
806: PetscLayoutSetUp(A->rmap);
807: PetscLayoutSetUp(A->cmap);
808: A->preallocated = PETSC_TRUE;
809: return 0;
810: }
812: #if defined(PETSC_HAVE_SAWS)
813: #include <petscviewersaws.h>
814: #endif
816: /*@C
817: MatViewFromOptions - View from Options
819: Collective on Mat
821: Input Parameters:
822: + A - the Mat context
823: . obj - Optional object
824: - name - command line option
826: Level: intermediate
827: .seealso: Mat, MatView, PetscObjectViewFromOptions(), MatCreate()
828: @*/
829: PetscErrorCode MatViewFromOptions(Mat A,PetscObject obj,const char name[])
830: {
832: PetscObjectViewFromOptions((PetscObject)A,obj,name);
833: return 0;
834: }
836: /*@C
837: MatView - Visualizes a matrix object.
839: Collective on Mat
841: Input Parameters:
842: + mat - the matrix
843: - viewer - visualization context
845: Notes:
846: The available visualization contexts include
847: + PETSC_VIEWER_STDOUT_SELF - for sequential matrices
848: . PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
849: . PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
850: - PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure
852: The user can open alternative visualization contexts with
853: + PetscViewerASCIIOpen() - Outputs matrix to a specified file
854: . PetscViewerBinaryOpen() - Outputs matrix in binary to a
855: specified file; corresponding input uses MatLoad()
856: . PetscViewerDrawOpen() - Outputs nonzero matrix structure to
857: an X window display
858: - PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
859: Currently only the sequential dense and AIJ
860: matrix types support the Socket viewer.
862: The user can call PetscViewerPushFormat() to specify the output
863: format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
864: PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen). Available formats include
865: + PETSC_VIEWER_DEFAULT - default, prints matrix contents
866: . PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
867: . PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
868: . PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
869: format common among all matrix types
870: . PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
871: format (which is in many cases the same as the default)
872: . PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
873: size and structure (not the matrix entries)
874: - PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
875: the matrix structure
877: Options Database Keys:
878: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatAssemblyEnd()
879: . -mat_view ::ascii_info_detail - Prints more detailed info
880: . -mat_view - Prints matrix in ASCII format
881: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
882: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
883: . -display <name> - Sets display name (default is host)
884: . -draw_pause <sec> - Sets number of seconds to pause after display
885: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: ch_matlab for details)
886: . -viewer_socket_machine <machine> -
887: . -viewer_socket_port <port> -
888: . -mat_view binary - save matrix to file in binary format
889: - -viewer_binary_filename <name> -
891: Level: beginner
893: Notes:
894: The ASCII viewers are only recommended for small matrices on at most a moderate number of processes,
895: the program will seemingly hang and take hours for larger matrices, for larger matrices one should use the binary format.
897: In the debugger you can do "call MatView(mat,0)" to display the matrix. (The same holds for any PETSc object viewer).
899: See the manual page for MatLoad() for the exact format of the binary file when the binary
900: viewer is used.
902: See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
903: viewer is used and lib/petsc/bin/PetscBinaryIO.py for loading them into Python.
905: One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure,
906: and then use the following mouse functions.
907: .vb
908: left mouse: zoom in
909: middle mouse: zoom out
910: right mouse: continue with the simulation
911: .ve
913: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
914: PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
915: @*/
916: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
917: {
918: PetscInt rows,cols,rbs,cbs;
919: PetscBool isascii,isstring,issaws;
920: PetscViewerFormat format;
921: PetscMPIInt size;
925: if (!viewer) PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
928: MatCheckPreallocated(mat,1);
930: PetscViewerGetFormat(viewer,&format);
931: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
932: if (size == 1 && format == PETSC_VIEWER_LOAD_BALANCE) return 0;
934: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
935: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
936: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
937: if ((!isascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
938: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detail");
939: }
941: PetscLogEventBegin(MAT_View,mat,viewer,0,0);
942: if (isascii) {
944: PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
945: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
946: MatNullSpace nullsp,transnullsp;
948: PetscViewerASCIIPushTab(viewer);
949: MatGetSize(mat,&rows,&cols);
950: MatGetBlockSizes(mat,&rbs,&cbs);
951: if (rbs != 1 || cbs != 1) {
952: if (rbs != cbs) PetscViewerASCIIPrintf(viewer,"rows=%" PetscInt_FMT ", cols=%" PetscInt_FMT ", rbs=%" PetscInt_FMT ", cbs=%" PetscInt_FMT "\n",rows,cols,rbs,cbs);
953: else PetscViewerASCIIPrintf(viewer,"rows=%" PetscInt_FMT ", cols=%" PetscInt_FMT ", bs=%" PetscInt_FMT "\n",rows,cols,rbs);
954: } else PetscViewerASCIIPrintf(viewer,"rows=%" PetscInt_FMT ", cols=%" PetscInt_FMT "\n",rows,cols);
955: if (mat->factortype) {
956: MatSolverType solver;
957: MatFactorGetSolverType(mat,&solver);
958: PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
959: }
960: if (mat->ops->getinfo) {
961: MatInfo info;
962: MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
963: PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
964: if (!mat->factortype) PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls=%" PetscInt_FMT "\n",(PetscInt)info.mallocs);
965: }
966: MatGetNullSpace(mat,&nullsp);
967: MatGetTransposeNullSpace(mat,&transnullsp);
968: if (nullsp) PetscViewerASCIIPrintf(viewer," has attached null space\n");
969: if (transnullsp && transnullsp != nullsp) PetscViewerASCIIPrintf(viewer," has attached transposed null space\n");
970: MatGetNearNullSpace(mat,&nullsp);
971: if (nullsp) PetscViewerASCIIPrintf(viewer," has attached near null space\n");
972: PetscViewerASCIIPushTab(viewer);
973: MatProductView(mat,viewer);
974: PetscViewerASCIIPopTab(viewer);
975: }
976: } else if (issaws) {
977: #if defined(PETSC_HAVE_SAWS)
978: PetscMPIInt rank;
980: PetscObjectName((PetscObject)mat);
981: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
982: if (!((PetscObject)mat)->amsmem && rank == 0) {
983: PetscObjectViewSAWs((PetscObject)mat,viewer);
984: }
985: #endif
986: } else if (isstring) {
987: const char *type;
988: MatGetType(mat,&type);
989: PetscViewerStringSPrintf(viewer," MatType: %-7.7s",type);
990: if (mat->ops->view) (*mat->ops->view)(mat,viewer);
991: }
992: if ((format == PETSC_VIEWER_NATIVE || format == PETSC_VIEWER_LOAD_BALANCE) && mat->ops->viewnative) {
993: PetscViewerASCIIPushTab(viewer);
994: (*mat->ops->viewnative)(mat,viewer);
995: PetscViewerASCIIPopTab(viewer);
996: } else if (mat->ops->view) {
997: PetscViewerASCIIPushTab(viewer);
998: (*mat->ops->view)(mat,viewer);
999: PetscViewerASCIIPopTab(viewer);
1000: }
1001: if (isascii) {
1002: PetscViewerGetFormat(viewer,&format);
1003: if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1004: PetscViewerASCIIPopTab(viewer);
1005: }
1006: }
1007: PetscLogEventEnd(MAT_View,mat,viewer,0,0);
1008: return 0;
1009: }
1011: #if defined(PETSC_USE_DEBUG)
1012: #include <../src/sys/totalview/tv_data_display.h>
1013: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
1014: {
1015: TV_add_row("Local rows", "int", &mat->rmap->n);
1016: TV_add_row("Local columns", "int", &mat->cmap->n);
1017: TV_add_row("Global rows", "int", &mat->rmap->N);
1018: TV_add_row("Global columns", "int", &mat->cmap->N);
1019: TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
1020: return TV_format_OK;
1021: }
1022: #endif
1024: /*@C
1025: MatLoad - Loads a matrix that has been stored in binary/HDF5 format
1026: with MatView(). The matrix format is determined from the options database.
1027: Generates a parallel MPI matrix if the communicator has more than one
1028: processor. The default matrix type is AIJ.
1030: Collective on PetscViewer
1032: Input Parameters:
1033: + mat - the newly loaded matrix, this needs to have been created with MatCreate()
1034: or some related function before a call to MatLoad()
1035: - viewer - binary/HDF5 file viewer
1037: Options Database Keys:
1038: Used with block matrix formats (MATSEQBAIJ, ...) to specify
1039: block size
1040: . -matload_block_size <bs> - set block size
1042: Level: beginner
1044: Notes:
1045: If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
1046: Mat before calling this routine if you wish to set it from the options database.
1048: MatLoad() automatically loads into the options database any options
1049: given in the file filename.info where filename is the name of the file
1050: that was passed to the PetscViewerBinaryOpen(). The options in the info
1051: file will be ignored if you use the -viewer_binary_skip_info option.
1053: If the type or size of mat is not set before a call to MatLoad, PETSc
1054: sets the default matrix type AIJ and sets the local and global sizes.
1055: If type and/or size is already set, then the same are used.
1057: In parallel, each processor can load a subset of rows (or the
1058: entire matrix). This routine is especially useful when a large
1059: matrix is stored on disk and only part of it is desired on each
1060: processor. For example, a parallel solver may access only some of
1061: the rows from each processor. The algorithm used here reads
1062: relatively small blocks of data rather than reading the entire
1063: matrix and then subsetting it.
1065: Viewer's PetscViewerType must be either PETSCVIEWERBINARY or PETSCVIEWERHDF5.
1066: Such viewer can be created using PetscViewerBinaryOpen()/PetscViewerHDF5Open(),
1067: or the sequence like
1068: $ PetscViewer v;
1069: $ PetscViewerCreate(PETSC_COMM_WORLD,&v);
1070: $ PetscViewerSetType(v,PETSCVIEWERBINARY);
1071: $ PetscViewerSetFromOptions(v);
1072: $ PetscViewerFileSetMode(v,FILE_MODE_READ);
1073: $ PetscViewerFileSetName(v,"datafile");
1074: The optional PetscViewerSetFromOptions() call allows to override PetscViewerSetType() using option
1075: $ -viewer_type {binary,hdf5}
1077: See the example src/ksp/ksp/tutorials/ex27.c with the first approach,
1078: and src/mat/tutorials/ex10.c with the second approach.
1080: Notes about the PETSc binary format:
1081: In case of PETSCVIEWERBINARY, a native PETSc binary format is used. Each of the blocks
1082: is read onto rank 0 and then shipped to its destination rank, one after another.
1083: Multiple objects, both matrices and vectors, can be stored within the same file.
1084: Their PetscObject name is ignored; they are loaded in the order of their storage.
1086: Most users should not need to know the details of the binary storage
1087: format, since MatLoad() and MatView() completely hide these details.
1088: But for anyone who's interested, the standard binary matrix storage
1089: format is
1091: $ PetscInt MAT_FILE_CLASSID
1092: $ PetscInt number of rows
1093: $ PetscInt number of columns
1094: $ PetscInt total number of nonzeros
1095: $ PetscInt *number nonzeros in each row
1096: $ PetscInt *column indices of all nonzeros (starting index is zero)
1097: $ PetscScalar *values of all nonzeros
1099: PETSc automatically does the byte swapping for
1100: machines that store the bytes reversed, e.g. DEC alpha, freebsd,
1101: Linux, Microsoft Windows and the Intel Paragon; thus if you write your own binary
1102: read/write routines you have to swap the bytes; see PetscBinaryRead()
1103: and PetscBinaryWrite() to see how this may be done.
1105: Notes about the HDF5 (MATLAB MAT-File Version 7.3) format:
1106: In case of PETSCVIEWERHDF5, a parallel HDF5 reader is used.
1107: Each processor's chunk is loaded independently by its owning rank.
1108: Multiple objects, both matrices and vectors, can be stored within the same file.
1109: They are looked up by their PetscObject name.
1111: As the MATLAB MAT-File Version 7.3 format is also a HDF5 flavor, we decided to use
1112: by default the same structure and naming of the AIJ arrays and column count
1113: within the HDF5 file. This means that a MAT file saved with -v7.3 flag, e.g.
1114: $ save example.mat A b -v7.3
1115: can be directly read by this routine (see Reference 1 for details).
1116: Note that depending on your MATLAB version, this format might be a default,
1117: otherwise you can set it as default in Preferences.
1119: Unless -nocompression flag is used to save the file in MATLAB,
1120: PETSc must be configured with ZLIB package.
1122: See also examples src/mat/tutorials/ex10.c and src/ksp/ksp/tutorials/ex27.c
1124: Current HDF5 (MAT-File) limitations:
1125: This reader currently supports only real MATSEQAIJ, MATMPIAIJ, MATSEQDENSE and MATMPIDENSE matrices.
1127: Corresponding MatView() is not yet implemented.
1129: The loaded matrix is actually a transpose of the original one in MATLAB,
1130: unless you push PETSC_VIEWER_HDF5_MAT format (see examples above).
1131: With this format, matrix is automatically transposed by PETSc,
1132: unless the matrix is marked as SPD or symmetric
1133: (see MatSetOption(), MAT_SPD, MAT_SYMMETRIC).
1135: References:
1136: . * - MATLAB(R) Documentation, manual page of save(), https://www.mathworks.com/help/matlab/ref/save.html#btox10b-1-version
1138: .seealso: PetscViewerBinaryOpen(), PetscViewerSetType(), MatView(), VecLoad()
1140: @*/
1141: PetscErrorCode MatLoad(Mat mat,PetscViewer viewer)
1142: {
1143: PetscBool flg;
1148: if (!((PetscObject)mat)->type_name) MatSetType(mat,MATAIJ);
1150: flg = PETSC_FALSE;
1151: PetscOptionsGetBool(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matload_symmetric",&flg,NULL);
1152: if (flg) {
1153: MatSetOption(mat,MAT_SYMMETRIC,PETSC_TRUE);
1154: MatSetOption(mat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1155: }
1156: flg = PETSC_FALSE;
1157: PetscOptionsGetBool(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matload_spd",&flg,NULL);
1158: if (flg) MatSetOption(mat,MAT_SPD,PETSC_TRUE);
1161: PetscLogEventBegin(MAT_Load,mat,viewer,0,0);
1162: (*mat->ops->load)(mat,viewer);
1163: PetscLogEventEnd(MAT_Load,mat,viewer,0,0);
1164: return 0;
1165: }
1167: static PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1168: {
1169: Mat_Redundant *redund = *redundant;
1171: if (redund) {
1172: if (redund->matseq) { /* via MatCreateSubMatrices() */
1173: ISDestroy(&redund->isrow);
1174: ISDestroy(&redund->iscol);
1175: MatDestroySubMatrices(1,&redund->matseq);
1176: } else {
1177: PetscFree2(redund->send_rank,redund->recv_rank);
1178: PetscFree(redund->sbuf_j);
1179: PetscFree(redund->sbuf_a);
1180: for (PetscInt i=0; i<redund->nrecvs; i++) {
1181: PetscFree(redund->rbuf_j[i]);
1182: PetscFree(redund->rbuf_a[i]);
1183: }
1184: PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1185: }
1187: if (redund->subcomm) PetscCommDestroy(&redund->subcomm);
1188: PetscFree(redund);
1189: }
1190: return 0;
1191: }
1193: /*@C
1194: MatDestroy - Frees space taken by a matrix.
1196: Collective on Mat
1198: Input Parameter:
1199: . A - the matrix
1201: Level: beginner
1203: @*/
1204: PetscErrorCode MatDestroy(Mat *A)
1205: {
1206: if (!*A) return 0;
1208: if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return 0;}
1210: /* if memory was published with SAWs then destroy it */
1211: PetscObjectSAWsViewOff((PetscObject)*A);
1212: if ((*A)->ops->destroy) (*(*A)->ops->destroy)(*A);
1214: PetscFree((*A)->defaultvectype);
1215: PetscFree((*A)->bsizes);
1216: PetscFree((*A)->solvertype);
1217: for (PetscInt i=0; i<MAT_FACTOR_NUM_TYPES; i++) PetscFree((*A)->preferredordering[i]);
1218: MatDestroy_Redundant(&(*A)->redundant);
1219: MatProductClear(*A);
1220: MatNullSpaceDestroy(&(*A)->nullsp);
1221: MatNullSpaceDestroy(&(*A)->transnullsp);
1222: MatNullSpaceDestroy(&(*A)->nearnullsp);
1223: MatDestroy(&(*A)->schur);
1224: PetscLayoutDestroy(&(*A)->rmap);
1225: PetscLayoutDestroy(&(*A)->cmap);
1226: PetscHeaderDestroy(A);
1227: return 0;
1228: }
1230: /*@C
1231: MatSetValues - Inserts or adds a block of values into a matrix.
1232: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1233: MUST be called after all calls to MatSetValues() have been completed.
1235: Not Collective
1237: Input Parameters:
1238: + mat - the matrix
1239: . v - a logically two-dimensional array of values
1240: . m, idxm - the number of rows and their global indices
1241: . n, idxn - the number of columns and their global indices
1242: - addv - either ADD_VALUES or INSERT_VALUES, where
1243: ADD_VALUES adds values to any existing entries, and
1244: INSERT_VALUES replaces existing entries with new values
1246: Notes:
1247: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1248: MatSetUp() before using this routine
1250: By default the values, v, are row-oriented. See MatSetOption() for other options.
1252: Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1253: options cannot be mixed without intervening calls to the assembly
1254: routines.
1256: MatSetValues() uses 0-based row and column numbers in Fortran
1257: as well as in C.
1259: Negative indices may be passed in idxm and idxn, these rows and columns are
1260: simply ignored. This allows easily inserting element stiffness matrices
1261: with homogeneous Dirchlet boundary conditions that you don't want represented
1262: in the matrix.
1264: Efficiency Alert:
1265: The routine MatSetValuesBlocked() may offer much better efficiency
1266: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1268: Level: beginner
1270: Developer Notes:
1271: This is labeled with C so does not automatically generate Fortran stubs and interfaces
1272: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
1274: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1275: InsertMode, INSERT_VALUES, ADD_VALUES
1276: @*/
1277: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1278: {
1282: if (!m || !n) return 0; /* no values to insert */
1285: MatCheckPreallocated(mat,1);
1287: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
1290: if (PetscDefined(USE_DEBUG)) {
1291: PetscInt i,j;
1296: for (i=0; i<m; i++) {
1297: for (j=0; j<n; j++) {
1298: if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1299: #if defined(PETSC_USE_COMPLEX)
1300: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+i%g at matrix entry (%" PetscInt_FMT ",%" PetscInt_FMT ")",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1301: #else
1302: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%" PetscInt_FMT ",%" PetscInt_FMT ")",(double)v[i*n+j],idxm[i],idxn[j]);
1303: #endif
1304: }
1305: }
1308: }
1310: if (mat->assembled) {
1311: mat->was_assembled = PETSC_TRUE;
1312: mat->assembled = PETSC_FALSE;
1313: }
1314: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1315: (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1316: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1317: return 0;
1318: }
1320: /*@
1321: MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1322: values into a matrix
1324: Not Collective
1326: Input Parameters:
1327: + mat - the matrix
1328: . row - the (block) row to set
1329: - v - a logically two-dimensional array of values
1331: Notes:
1332: By the values, v, are column-oriented (for the block version) and sorted
1334: All the nonzeros in the row must be provided
1336: The matrix must have previously had its column indices set
1338: The row must belong to this process
1340: Level: intermediate
1342: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1343: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1344: @*/
1345: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1346: {
1347: PetscInt globalrow;
1352: ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1353: MatSetValuesRow(mat,globalrow,v);
1354: return 0;
1355: }
1357: /*@
1358: MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1359: values into a matrix
1361: Not Collective
1363: Input Parameters:
1364: + mat - the matrix
1365: . row - the (block) row to set
1366: - v - a logically two-dimensional (column major) array of values for block matrices with blocksize larger than one, otherwise a one dimensional array of values
1368: Notes:
1369: The values, v, are column-oriented for the block version.
1371: All the nonzeros in the row must be provided
1373: THE MATRIX MUST HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.
1375: The row must belong to this process
1377: Level: advanced
1379: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1380: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1381: @*/
1382: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1383: {
1387: MatCheckPreallocated(mat,1);
1391: mat->insertmode = INSERT_VALUES;
1393: if (mat->assembled) {
1394: mat->was_assembled = PETSC_TRUE;
1395: mat->assembled = PETSC_FALSE;
1396: }
1397: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1399: (*mat->ops->setvaluesrow)(mat,row,v);
1400: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1401: return 0;
1402: }
1404: /*@
1405: MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1406: Using structured grid indexing
1408: Not Collective
1410: Input Parameters:
1411: + mat - the matrix
1412: . m - number of rows being entered
1413: . idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1414: . n - number of columns being entered
1415: . idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1416: . v - a logically two-dimensional array of values
1417: - addv - either ADD_VALUES or INSERT_VALUES, where
1418: ADD_VALUES adds values to any existing entries, and
1419: INSERT_VALUES replaces existing entries with new values
1421: Notes:
1422: By default the values, v, are row-oriented. See MatSetOption() for other options.
1424: Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1425: options cannot be mixed without intervening calls to the assembly
1426: routines.
1428: The grid coordinates are across the entire grid, not just the local portion
1430: MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1431: as well as in C.
1433: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1435: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1436: or call MatSetLocalToGlobalMapping() and MatSetStencil() first.
1438: The columns and rows in the stencil passed in MUST be contained within the
1439: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1440: if you create a DMDA with an overlap of one grid level and on a particular process its first
1441: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1442: first i index you can use in your column and row indices in MatSetStencil() is 5.
1444: In Fortran idxm and idxn should be declared as
1445: $ MatStencil idxm(4,m),idxn(4,n)
1446: and the values inserted using
1447: $ idxm(MatStencil_i,1) = i
1448: $ idxm(MatStencil_j,1) = j
1449: $ idxm(MatStencil_k,1) = k
1450: $ idxm(MatStencil_c,1) = c
1451: etc
1453: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1454: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1455: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1456: DM_BOUNDARY_PERIODIC boundary type.
1458: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1459: a single value per point) you can skip filling those indices.
1461: Inspired by the structured grid interface to the HYPRE package
1462: (https://computation.llnl.gov/projects/hypre-scalable-linear-solvers-multigrid-methods)
1464: Efficiency Alert:
1465: The routine MatSetValuesBlockedStencil() may offer much better efficiency
1466: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
1468: Level: beginner
1470: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1471: MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1472: @*/
1473: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1474: {
1475: PetscInt buf[8192],*bufm=NULL,*bufn=NULL,*jdxm,*jdxn;
1476: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1477: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1479: if (!m || !n) return 0; /* no values to insert */
1485: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1486: jdxm = buf; jdxn = buf+m;
1487: } else {
1488: PetscMalloc2(m,&bufm,n,&bufn);
1489: jdxm = bufm; jdxn = bufn;
1490: }
1491: for (i=0; i<m; i++) {
1492: for (j=0; j<3-sdim; j++) dxm++;
1493: tmp = *dxm++ - starts[0];
1494: for (j=0; j<dim-1; j++) {
1495: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1496: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1497: }
1498: if (mat->stencil.noc) dxm++;
1499: jdxm[i] = tmp;
1500: }
1501: for (i=0; i<n; i++) {
1502: for (j=0; j<3-sdim; j++) dxn++;
1503: tmp = *dxn++ - starts[0];
1504: for (j=0; j<dim-1; j++) {
1505: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1506: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1507: }
1508: if (mat->stencil.noc) dxn++;
1509: jdxn[i] = tmp;
1510: }
1511: MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1512: PetscFree2(bufm,bufn);
1513: return 0;
1514: }
1516: /*@
1517: MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1518: Using structured grid indexing
1520: Not Collective
1522: Input Parameters:
1523: + mat - the matrix
1524: . m - number of rows being entered
1525: . idxm - grid coordinates for matrix rows being entered
1526: . n - number of columns being entered
1527: . idxn - grid coordinates for matrix columns being entered
1528: . v - a logically two-dimensional array of values
1529: - addv - either ADD_VALUES or INSERT_VALUES, where
1530: ADD_VALUES adds values to any existing entries, and
1531: INSERT_VALUES replaces existing entries with new values
1533: Notes:
1534: By default the values, v, are row-oriented and unsorted.
1535: See MatSetOption() for other options.
1537: Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1538: options cannot be mixed without intervening calls to the assembly
1539: routines.
1541: The grid coordinates are across the entire grid, not just the local portion
1543: MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1544: as well as in C.
1546: For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine
1548: In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1549: or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.
1551: The columns and rows in the stencil passed in MUST be contained within the
1552: ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1553: if you create a DMDA with an overlap of one grid level and on a particular process its first
1554: local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1555: first i index you can use in your column and row indices in MatSetStencil() is 5.
1557: In Fortran idxm and idxn should be declared as
1558: $ MatStencil idxm(4,m),idxn(4,n)
1559: and the values inserted using
1560: $ idxm(MatStencil_i,1) = i
1561: $ idxm(MatStencil_j,1) = j
1562: $ idxm(MatStencil_k,1) = k
1563: etc
1565: Negative indices may be passed in idxm and idxn, these rows and columns are
1566: simply ignored. This allows easily inserting element stiffness matrices
1567: with homogeneous Dirchlet boundary conditions that you don't want represented
1568: in the matrix.
1570: Inspired by the structured grid interface to the HYPRE package
1571: (https://computation.llnl.gov/projects/hypre-scalable-linear-solvers-multigrid-methods)
1573: Level: beginner
1575: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1576: MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1577: MatSetBlockSize(), MatSetLocalToGlobalMapping()
1578: @*/
1579: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1580: {
1581: PetscInt buf[8192],*bufm=NULL,*bufn=NULL,*jdxm,*jdxn;
1582: PetscInt j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1583: PetscInt *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);
1585: if (!m || !n) return 0; /* no values to insert */
1592: if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1593: jdxm = buf; jdxn = buf+m;
1594: } else {
1595: PetscMalloc2(m,&bufm,n,&bufn);
1596: jdxm = bufm; jdxn = bufn;
1597: }
1598: for (i=0; i<m; i++) {
1599: for (j=0; j<3-sdim; j++) dxm++;
1600: tmp = *dxm++ - starts[0];
1601: for (j=0; j<sdim-1; j++) {
1602: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1603: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1604: }
1605: dxm++;
1606: jdxm[i] = tmp;
1607: }
1608: for (i=0; i<n; i++) {
1609: for (j=0; j<3-sdim; j++) dxn++;
1610: tmp = *dxn++ - starts[0];
1611: for (j=0; j<sdim-1; j++) {
1612: if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1613: else tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1614: }
1615: dxn++;
1616: jdxn[i] = tmp;
1617: }
1618: MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1619: PetscFree2(bufm,bufn);
1620: return 0;
1621: }
1623: /*@
1624: MatSetStencil - Sets the grid information for setting values into a matrix via
1625: MatSetValuesStencil()
1627: Not Collective
1629: Input Parameters:
1630: + mat - the matrix
1631: . dim - dimension of the grid 1, 2, or 3
1632: . dims - number of grid points in x, y, and z direction, including ghost points on your processor
1633: . starts - starting point of ghost nodes on your processor in x, y, and z direction
1634: - dof - number of degrees of freedom per node
1636: Inspired by the structured grid interface to the HYPRE package
1637: (www.llnl.gov/CASC/hyper)
1639: For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1640: user.
1642: Level: beginner
1644: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1645: MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1646: @*/
1647: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1648: {
1653: mat->stencil.dim = dim + (dof > 1);
1654: for (PetscInt i=0; i<dim; i++) {
1655: mat->stencil.dims[i] = dims[dim-i-1]; /* copy the values in backwards */
1656: mat->stencil.starts[i] = starts[dim-i-1];
1657: }
1658: mat->stencil.dims[dim] = dof;
1659: mat->stencil.starts[dim] = 0;
1660: mat->stencil.noc = (PetscBool)(dof == 1);
1661: return 0;
1662: }
1664: /*@C
1665: MatSetValuesBlocked - Inserts or adds a block of values into a matrix.
1667: Not Collective
1669: Input Parameters:
1670: + mat - the matrix
1671: . v - a logically two-dimensional array of values
1672: . m, idxm - the number of block rows and their global block indices
1673: . n, idxn - the number of block columns and their global block indices
1674: - addv - either ADD_VALUES or INSERT_VALUES, where
1675: ADD_VALUES adds values to any existing entries, and
1676: INSERT_VALUES replaces existing entries with new values
1678: Notes:
1679: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1680: MatXXXXSetPreallocation() or MatSetUp() before using this routine.
1682: The m and n count the NUMBER of blocks in the row direction and column direction,
1683: NOT the total number of rows/columns; for example, if the block size is 2 and
1684: you are passing in values for rows 2,3,4,5 then m would be 2 (not 4).
1685: The values in idxm would be 1 2; that is the first index for each block divided by
1686: the block size.
1688: Note that you must call MatSetBlockSize() when constructing this matrix (before
1689: preallocating it).
1691: By default the values, v, are row-oriented, so the layout of
1692: v is the same as for MatSetValues(). See MatSetOption() for other options.
1694: Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1695: options cannot be mixed without intervening calls to the assembly
1696: routines.
1698: MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1699: as well as in C.
1701: Negative indices may be passed in idxm and idxn, these rows and columns are
1702: simply ignored. This allows easily inserting element stiffness matrices
1703: with homogeneous Dirchlet boundary conditions that you don't want represented
1704: in the matrix.
1706: Each time an entry is set within a sparse matrix via MatSetValues(),
1707: internal searching must be done to determine where to place the
1708: data in the matrix storage space. By instead inserting blocks of
1709: entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1710: reduced.
1712: Example:
1713: $ Suppose m=n=2 and block size(bs) = 2 The array is
1714: $
1715: $ 1 2 | 3 4
1716: $ 5 6 | 7 8
1717: $ - - - | - - -
1718: $ 9 10 | 11 12
1719: $ 13 14 | 15 16
1720: $
1721: $ v[] should be passed in like
1722: $ v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1723: $
1724: $ If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1725: $ v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]
1727: Level: intermediate
1729: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1730: @*/
1731: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1732: {
1736: if (!m || !n) return 0; /* no values to insert */
1739: MatCheckPreallocated(mat,1);
1740: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
1742: if (PetscDefined(USE_DEBUG)) {
1745: }
1746: if (PetscDefined(USE_DEBUG)) {
1747: PetscInt rbs,cbs,M,N,i;
1748: MatGetBlockSizes(mat,&rbs,&cbs);
1749: MatGetSize(mat,&M,&N);
1752: }
1753: if (mat->assembled) {
1754: mat->was_assembled = PETSC_TRUE;
1755: mat->assembled = PETSC_FALSE;
1756: }
1757: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1758: if (mat->ops->setvaluesblocked) {
1759: (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1760: } else {
1761: PetscInt buf[8192],*bufr=NULL,*bufc=NULL,*iidxm,*iidxn;
1762: PetscInt i,j,bs,cbs;
1764: MatGetBlockSizes(mat,&bs,&cbs);
1765: if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1766: iidxm = buf;
1767: iidxn = buf + m*bs;
1768: } else {
1769: PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1770: iidxm = bufr;
1771: iidxn = bufc;
1772: }
1773: for (i=0; i<m; i++) {
1774: for (j=0; j<bs; j++) {
1775: iidxm[i*bs+j] = bs*idxm[i] + j;
1776: }
1777: }
1778: if (m != n || bs != cbs || idxm != idxn) {
1779: for (i=0; i<n; i++) {
1780: for (j=0; j<cbs; j++) {
1781: iidxn[i*cbs+j] = cbs*idxn[i] + j;
1782: }
1783: }
1784: } else iidxn = iidxm;
1785: MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1786: PetscFree2(bufr,bufc);
1787: }
1788: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1789: return 0;
1790: }
1792: /*@C
1793: MatGetValues - Gets a block of values from a matrix.
1795: Not Collective; can only return values that are owned by the give process
1797: Input Parameters:
1798: + mat - the matrix
1799: . v - a logically two-dimensional array for storing the values
1800: . m, idxm - the number of rows and their global indices
1801: - n, idxn - the number of columns and their global indices
1803: Notes:
1804: The user must allocate space (m*n PetscScalars) for the values, v.
1805: The values, v, are then returned in a row-oriented format,
1806: analogous to that used by default in MatSetValues().
1808: MatGetValues() uses 0-based row and column numbers in
1809: Fortran as well as in C.
1811: MatGetValues() requires that the matrix has been assembled
1812: with MatAssemblyBegin()/MatAssemblyEnd(). Thus, calls to
1813: MatSetValues() and MatGetValues() CANNOT be made in succession
1814: without intermediate matrix assembly.
1816: Negative row or column indices will be ignored and those locations in v[] will be
1817: left unchanged.
1819: For the standard row-based matrix formats, idxm[] can only contain rows owned by the requesting MPI rank.
1820: That is, rows with global index greater than or equal to rstart and less than rend where rstart and rend are obtainable
1821: from MatGetOwnershipRange(mat,&rstart,&rend).
1823: Level: advanced
1825: .seealso: MatGetRow(), MatCreateSubMatrices(), MatSetValues(), MatGetOwnershipRange(), MatGetValuesLocal(), MatGetValue()
1826: @*/
1827: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1828: {
1831: if (!m || !n) return 0;
1838: MatCheckPreallocated(mat,1);
1840: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1841: (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1842: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1843: return 0;
1844: }
1846: /*@C
1847: MatGetValuesLocal - retrieves values from certain locations in a matrix using the local numbering of the indices
1848: defined previously by MatSetLocalToGlobalMapping()
1850: Not Collective
1852: Input Parameters:
1853: + mat - the matrix
1854: . nrow, irow - number of rows and their local indices
1855: - ncol, icol - number of columns and their local indices
1857: Output Parameter:
1858: . y - a logically two-dimensional array of values
1860: Notes:
1861: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine.
1863: This routine can only return values that are owned by the requesting MPI rank. That is, for standard matrix formats, rows that, in the global numbering,
1864: are greater than or equal to rstart and less than rend where rstart and rend are obtainable from MatGetOwnershipRange(mat,&rstart,&rend). One can
1865: determine if the resulting global row associated with the local row r is owned by the requesting MPI rank by applying the ISLocalToGlobalMapping set
1866: with MatSetLocalToGlobalMapping().
1868: Developer Notes:
1869: This is labelled with C so does not automatically generate Fortran stubs and interfaces
1870: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
1872: Level: advanced
1874: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
1875: MatSetValuesLocal(), MatGetValues()
1876: @*/
1877: PetscErrorCode MatGetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],PetscScalar y[])
1878: {
1882: MatCheckPreallocated(mat,1);
1883: if (!nrow || !ncol) return 0; /* no values to retrieve */
1886: if (PetscDefined(USE_DEBUG)) {
1889: }
1891: PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1892: if (mat->ops->getvalueslocal) {
1893: (*mat->ops->getvalueslocal)(mat,nrow,irow,ncol,icol,y);
1894: } else {
1895: PetscInt buf[8192],*bufr=NULL,*bufc=NULL,*irowm,*icolm;
1896: if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1897: irowm = buf; icolm = buf+nrow;
1898: } else {
1899: PetscMalloc2(nrow,&bufr,ncol,&bufc);
1900: irowm = bufr; icolm = bufc;
1901: }
1904: ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
1905: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
1906: MatGetValues(mat,nrow,irowm,ncol,icolm,y);
1907: PetscFree2(bufr,bufc);
1908: }
1909: PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1910: return 0;
1911: }
1913: /*@
1914: MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1915: the same size. Currently, this can only be called once and creates the given matrix.
1917: Not Collective
1919: Input Parameters:
1920: + mat - the matrix
1921: . nb - the number of blocks
1922: . bs - the number of rows (and columns) in each block
1923: . rows - a concatenation of the rows for each block
1924: - v - a concatenation of logically two-dimensional arrays of values
1926: Notes:
1927: In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.
1929: Level: advanced
1931: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1932: InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1933: @*/
1934: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1935: {
1940: PetscAssert(!mat->factortype,PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1942: PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1943: if (mat->ops->setvaluesbatch) {
1944: (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1945: } else {
1946: for (PetscInt b = 0; b < nb; ++b) MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1947: }
1948: PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1949: return 0;
1950: }
1952: /*@
1953: MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1954: the routine MatSetValuesLocal() to allow users to insert matrix entries
1955: using a local (per-processor) numbering.
1957: Not Collective
1959: Input Parameters:
1960: + x - the matrix
1961: . rmapping - row mapping created with ISLocalToGlobalMappingCreate() or ISLocalToGlobalMappingCreateIS()
1962: - cmapping - column mapping
1964: Level: intermediate
1966: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal(), MatGetValuesLocal()
1967: @*/
1968: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1969: {
1974: if (x->ops->setlocaltoglobalmapping) {
1975: (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1976: } else {
1977: PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1978: PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1979: }
1980: return 0;
1981: }
1983: /*@
1984: MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()
1986: Not Collective
1988: Input Parameter:
1989: . A - the matrix
1991: Output Parameters:
1992: + rmapping - row mapping
1993: - cmapping - column mapping
1995: Level: advanced
1997: .seealso: MatSetValuesLocal()
1998: @*/
1999: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
2000: {
2003: if (rmapping) {
2005: *rmapping = A->rmap->mapping;
2006: }
2007: if (cmapping) {
2009: *cmapping = A->cmap->mapping;
2010: }
2011: return 0;
2012: }
2014: /*@
2015: MatSetLayouts - Sets the PetscLayout objects for rows and columns of a matrix
2017: Logically Collective on A
2019: Input Parameters:
2020: + A - the matrix
2021: . rmap - row layout
2022: - cmap - column layout
2024: Level: advanced
2026: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping(), MatGetLayouts()
2027: @*/
2028: PetscErrorCode MatSetLayouts(Mat A,PetscLayout rmap,PetscLayout cmap)
2029: {
2031: PetscLayoutReference(rmap,&A->rmap);
2032: PetscLayoutReference(cmap,&A->cmap);
2033: return 0;
2034: }
2036: /*@
2037: MatGetLayouts - Gets the PetscLayout objects for rows and columns
2039: Not Collective
2041: Input Parameter:
2042: . A - the matrix
2044: Output Parameters:
2045: + rmap - row layout
2046: - cmap - column layout
2048: Level: advanced
2050: .seealso: MatCreateVecs(), MatGetLocalToGlobalMapping(), MatSetLayouts()
2051: @*/
2052: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
2053: {
2056: if (rmap) {
2058: *rmap = A->rmap;
2059: }
2060: if (cmap) {
2062: *cmap = A->cmap;
2063: }
2064: return 0;
2065: }
2067: /*@C
2068: MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
2069: using a local numbering of the nodes.
2071: Not Collective
2073: Input Parameters:
2074: + mat - the matrix
2075: . nrow, irow - number of rows and their local indices
2076: . ncol, icol - number of columns and their local indices
2077: . y - a logically two-dimensional array of values
2078: - addv - either INSERT_VALUES or ADD_VALUES, where
2079: ADD_VALUES adds values to any existing entries, and
2080: INSERT_VALUES replaces existing entries with new values
2082: Notes:
2083: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2084: MatSetUp() before using this routine
2086: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine
2088: Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2089: options cannot be mixed without intervening calls to the assembly
2090: routines.
2092: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2093: MUST be called after all calls to MatSetValuesLocal() have been completed.
2095: Level: intermediate
2097: Developer Notes:
2098: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2099: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2101: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2102: MatSetValueLocal(), MatGetValuesLocal()
2103: @*/
2104: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2105: {
2109: MatCheckPreallocated(mat,1);
2110: if (!nrow || !ncol) return 0; /* no values to insert */
2113: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
2115: if (PetscDefined(USE_DEBUG)) {
2118: }
2120: if (mat->assembled) {
2121: mat->was_assembled = PETSC_TRUE;
2122: mat->assembled = PETSC_FALSE;
2123: }
2124: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2125: if (mat->ops->setvalueslocal) {
2126: (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2127: } else {
2128: PetscInt buf[8192],*bufr=NULL,*bufc=NULL;
2129: const PetscInt *irowm,*icolm;
2131: if ((!mat->rmap->mapping && !mat->cmap->mapping) || (nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2132: bufr = buf;
2133: bufc = buf + nrow;
2134: irowm = bufr;
2135: icolm = bufc;
2136: } else {
2137: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2138: irowm = bufr;
2139: icolm = bufc;
2140: }
2141: if (mat->rmap->mapping) ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,bufr);
2142: else irowm = irow;
2143: if (mat->cmap->mapping) {
2144: if (mat->cmap->mapping != mat->rmap->mapping || ncol != nrow || icol != irow) {
2145: ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,bufc);
2146: } else icolm = irowm;
2147: } else icolm = icol;
2148: MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2149: if (bufr != buf) PetscFree2(bufr,bufc);
2150: }
2151: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2152: return 0;
2153: }
2155: /*@C
2156: MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2157: using a local ordering of the nodes a block at a time.
2159: Not Collective
2161: Input Parameters:
2162: + x - the matrix
2163: . nrow, irow - number of rows and their local indices
2164: . ncol, icol - number of columns and their local indices
2165: . y - a logically two-dimensional array of values
2166: - addv - either INSERT_VALUES or ADD_VALUES, where
2167: ADD_VALUES adds values to any existing entries, and
2168: INSERT_VALUES replaces existing entries with new values
2170: Notes:
2171: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2172: MatSetUp() before using this routine
2174: If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2175: before using this routineBefore calling MatSetValuesLocal(), the user must first set the
2177: Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2178: options cannot be mixed without intervening calls to the assembly
2179: routines.
2181: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2182: MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.
2184: Level: intermediate
2186: Developer Notes:
2187: This is labeled with C so does not automatically generate Fortran stubs and interfaces
2188: because it requires multiple Fortran interfaces depending on which arguments are scalar or arrays.
2190: .seealso: MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2191: MatSetValuesLocal(), MatSetValuesBlocked()
2192: @*/
2193: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2194: {
2198: MatCheckPreallocated(mat,1);
2199: if (!nrow || !ncol) return 0; /* no values to insert */
2202: if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
2204: if (PetscDefined(USE_DEBUG)) {
2207: }
2209: if (mat->assembled) {
2210: mat->was_assembled = PETSC_TRUE;
2211: mat->assembled = PETSC_FALSE;
2212: }
2213: if (PetscUnlikelyDebug(mat->rmap->mapping)) { /* Condition on the mapping existing, because MatSetValuesBlockedLocal_IS does not require it to be set. */
2214: PetscInt irbs, rbs;
2215: MatGetBlockSizes(mat, &rbs, NULL);
2216: ISLocalToGlobalMappingGetBlockSize(mat->rmap->mapping,&irbs);
2218: }
2219: if (PetscUnlikelyDebug(mat->cmap->mapping)) {
2220: PetscInt icbs, cbs;
2221: MatGetBlockSizes(mat,NULL,&cbs);
2222: ISLocalToGlobalMappingGetBlockSize(mat->cmap->mapping,&icbs);
2224: }
2225: PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2226: if (mat->ops->setvaluesblockedlocal) {
2227: (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2228: } else {
2229: PetscInt buf[8192],*bufr=NULL,*bufc=NULL;
2230: const PetscInt *irowm,*icolm;
2232: if ((!mat->rmap->mapping && !mat->cmap->mapping) || (nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2233: bufr = buf;
2234: bufc = buf + nrow;
2235: irowm = bufr;
2236: icolm = bufc;
2237: } else {
2238: PetscMalloc2(nrow,&bufr,ncol,&bufc);
2239: irowm = bufr;
2240: icolm = bufc;
2241: }
2242: if (mat->rmap->mapping) ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,bufr);
2243: else irowm = irow;
2244: if (mat->cmap->mapping) {
2245: if (mat->cmap->mapping != mat->rmap->mapping || ncol != nrow || icol != irow) {
2246: ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,bufc);
2247: } else icolm = irowm;
2248: } else icolm = icol;
2249: MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2250: if (bufr != buf) PetscFree2(bufr,bufc);
2251: }
2252: PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2253: return 0;
2254: }
2256: /*@
2257: MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal
2259: Collective on Mat
2261: Input Parameters:
2262: + mat - the matrix
2263: - x - the vector to be multiplied
2265: Output Parameters:
2266: . y - the result
2268: Notes:
2269: The vectors x and y cannot be the same. I.e., one cannot
2270: call MatMult(A,y,y).
2272: Level: developer
2274: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2275: @*/
2276: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2277: {
2286: MatCheckPreallocated(mat,1);
2289: (*mat->ops->multdiagonalblock)(mat,x,y);
2290: PetscObjectStateIncrease((PetscObject)y);
2291: return 0;
2292: }
2294: /* --------------------------------------------------------*/
2295: /*@
2296: MatMult - Computes the matrix-vector product, y = Ax.
2298: Neighbor-wise Collective on Mat
2300: Input Parameters:
2301: + mat - the matrix
2302: - x - the vector to be multiplied
2304: Output Parameters:
2305: . y - the result
2307: Notes:
2308: The vectors x and y cannot be the same. I.e., one cannot
2309: call MatMult(A,y,y).
2311: Level: beginner
2313: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2314: @*/
2315: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2316: {
2328: VecSetErrorIfLocked(y,3);
2329: if (mat->erroriffailure) VecValidValues(x,2,PETSC_TRUE);
2330: MatCheckPreallocated(mat,1);
2332: VecLockReadPush(x);
2334: PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2335: (*mat->ops->mult)(mat,x,y);
2336: PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2337: if (mat->erroriffailure) VecValidValues(y,3,PETSC_FALSE);
2338: VecLockReadPop(x);
2339: return 0;
2340: }
2342: /*@
2343: MatMultTranspose - Computes matrix transpose times a vector y = A^T * x.
2345: Neighbor-wise Collective on Mat
2347: Input Parameters:
2348: + mat - the matrix
2349: - x - the vector to be multiplied
2351: Output Parameters:
2352: . y - the result
2354: Notes:
2355: The vectors x and y cannot be the same. I.e., one cannot
2356: call MatMultTranspose(A,y,y).
2358: For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2359: use MatMultHermitianTranspose()
2361: Level: beginner
2363: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2364: @*/
2365: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2366: {
2367: PetscErrorCode (*op)(Mat,Vec,Vec) = NULL;
2381: if (mat->erroriffailure) VecValidValues(x,2,PETSC_TRUE);
2382: MatCheckPreallocated(mat,1);
2384: if (!mat->ops->multtranspose) {
2385: if (mat->symmetric && mat->ops->mult) op = mat->ops->mult;
2387: } else op = mat->ops->multtranspose;
2388: PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2389: VecLockReadPush(x);
2390: (*op)(mat,x,y);
2391: VecLockReadPop(x);
2392: PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2393: PetscObjectStateIncrease((PetscObject)y);
2394: if (mat->erroriffailure) VecValidValues(y,3,PETSC_FALSE);
2395: return 0;
2396: }
2398: /*@
2399: MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.
2401: Neighbor-wise Collective on Mat
2403: Input Parameters:
2404: + mat - the matrix
2405: - x - the vector to be multilplied
2407: Output Parameters:
2408: . y - the result
2410: Notes:
2411: The vectors x and y cannot be the same. I.e., one cannot
2412: call MatMultHermitianTranspose(A,y,y).
2414: Also called the conjugate transpose, complex conjugate transpose, or adjoint.
2416: For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.
2418: Level: beginner
2420: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2421: @*/
2422: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2423: {
2436: MatCheckPreallocated(mat,1);
2438: PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2439: #if defined(PETSC_USE_COMPLEX)
2440: if (mat->ops->multhermitiantranspose || (mat->hermitian && mat->ops->mult)) {
2441: VecLockReadPush(x);
2442: if (mat->ops->multhermitiantranspose) {
2443: (*mat->ops->multhermitiantranspose)(mat,x,y);
2444: } else {
2445: (*mat->ops->mult)(mat,x,y);
2446: }
2447: VecLockReadPop(x);
2448: } else {
2449: Vec w;
2450: VecDuplicate(x,&w);
2451: VecCopy(x,w);
2452: VecConjugate(w);
2453: MatMultTranspose(mat,w,y);
2454: VecDestroy(&w);
2455: VecConjugate(y);
2456: }
2457: PetscObjectStateIncrease((PetscObject)y);
2458: #else
2459: MatMultTranspose(mat,x,y);
2460: #endif
2461: PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2462: return 0;
2463: }
2465: /*@
2466: MatMultAdd - Computes v3 = v2 + A * v1.
2468: Neighbor-wise Collective on Mat
2470: Input Parameters:
2471: + mat - the matrix
2472: - v1, v2 - the vectors
2474: Output Parameters:
2475: . v3 - the result
2477: Notes:
2478: The vectors v1 and v3 cannot be the same. I.e., one cannot
2479: call MatMultAdd(A,v1,v2,v1).
2481: Level: beginner
2483: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2484: @*/
2485: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2486: {
2501: MatCheckPreallocated(mat,1);
2504: PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2505: VecLockReadPush(v1);
2506: (*mat->ops->multadd)(mat,v1,v2,v3);
2507: VecLockReadPop(v1);
2508: PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2509: PetscObjectStateIncrease((PetscObject)v3);
2510: return 0;
2511: }
2513: /*@
2514: MatMultTransposeAdd - Computes v3 = v2 + A' * v1.
2516: Neighbor-wise Collective on Mat
2518: Input Parameters:
2519: + mat - the matrix
2520: - v1, v2 - the vectors
2522: Output Parameters:
2523: . v3 - the result
2525: Notes:
2526: The vectors v1 and v3 cannot be the same. I.e., one cannot
2527: call MatMultTransposeAdd(A,v1,v2,v1).
2529: Level: beginner
2531: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2532: @*/
2533: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2534: {
2535: PetscErrorCode (*op)(Mat,Vec,Vec,Vec) = (!mat->ops->multtransposeadd && mat->symmetric) ? mat->ops->multadd : mat->ops->multtransposeadd;
2550: MatCheckPreallocated(mat,1);
2552: PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2553: VecLockReadPush(v1);
2554: (*op)(mat,v1,v2,v3);
2555: VecLockReadPop(v1);
2556: PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2557: PetscObjectStateIncrease((PetscObject)v3);
2558: return 0;
2559: }
2561: /*@
2562: MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.
2564: Neighbor-wise Collective on Mat
2566: Input Parameters:
2567: + mat - the matrix
2568: - v1, v2 - the vectors
2570: Output Parameters:
2571: . v3 - the result
2573: Notes:
2574: The vectors v1 and v3 cannot be the same. I.e., one cannot
2575: call MatMultHermitianTransposeAdd(A,v1,v2,v1).
2577: Level: beginner
2579: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2580: @*/
2581: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2582: {
2595: MatCheckPreallocated(mat,1);
2597: PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2598: VecLockReadPush(v1);
2599: if (mat->ops->multhermitiantransposeadd) {
2600: (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2601: } else {
2602: Vec w,z;
2603: VecDuplicate(v1,&w);
2604: VecCopy(v1,w);
2605: VecConjugate(w);
2606: VecDuplicate(v3,&z);
2607: MatMultTranspose(mat,w,z);
2608: VecDestroy(&w);
2609: VecConjugate(z);
2610: if (v2 != v3) {
2611: VecWAXPY(v3,1.0,v2,z);
2612: } else {
2613: VecAXPY(v3,1.0,z);
2614: }
2615: VecDestroy(&z);
2616: }
2617: VecLockReadPop(v1);
2618: PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2619: PetscObjectStateIncrease((PetscObject)v3);
2620: return 0;
2621: }
2623: /*@C
2624: MatGetFactorType - gets the type of factorization it is
2626: Not Collective
2628: Input Parameters:
2629: . mat - the matrix
2631: Output Parameters:
2632: . t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2634: Level: intermediate
2636: .seealso: MatFactorType, MatGetFactor(), MatSetFactorType()
2637: @*/
2638: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2639: {
2643: *t = mat->factortype;
2644: return 0;
2645: }
2647: /*@C
2648: MatSetFactorType - sets the type of factorization it is
2650: Logically Collective on Mat
2652: Input Parameters:
2653: + mat - the matrix
2654: - t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT
2656: Level: intermediate
2658: .seealso: MatFactorType, MatGetFactor(), MatGetFactorType()
2659: @*/
2660: PetscErrorCode MatSetFactorType(Mat mat, MatFactorType t)
2661: {
2664: mat->factortype = t;
2665: return 0;
2666: }
2668: /* ------------------------------------------------------------*/
2669: /*@C
2670: MatGetInfo - Returns information about matrix storage (number of
2671: nonzeros, memory, etc.).
2673: Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag
2675: Input Parameter:
2676: . mat - the matrix
2678: Output Parameters:
2679: + flag - flag indicating the type of parameters to be returned
2680: (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2681: MAT_GLOBAL_SUM - sum over all processors)
2682: - info - matrix information context
2684: Notes:
2685: The MatInfo context contains a variety of matrix data, including
2686: number of nonzeros allocated and used, number of mallocs during
2687: matrix assembly, etc. Additional information for factored matrices
2688: is provided (such as the fill ratio, number of mallocs during
2689: factorization, etc.). Much of this info is printed to PETSC_STDOUT
2690: when using the runtime options
2691: $ -info -mat_view ::ascii_info
2693: Example for C/C++ Users:
2694: See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2695: data within the MatInfo context. For example,
2696: .vb
2697: MatInfo info;
2698: Mat A;
2699: double mal, nz_a, nz_u;
2701: MatGetInfo(A,MAT_LOCAL,&info);
2702: mal = info.mallocs;
2703: nz_a = info.nz_allocated;
2704: .ve
2706: Example for Fortran Users:
2707: Fortran users should declare info as a double precision
2708: array of dimension MAT_INFO_SIZE, and then extract the parameters
2709: of interest. See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2710: a complete list of parameter names.
2711: .vb
2712: double precision info(MAT_INFO_SIZE)
2713: double precision mal, nz_a
2714: Mat A
2715: integer ierr
2717: call MatGetInfo(A,MAT_LOCAL,info,ierr)
2718: mal = info(MAT_INFO_MALLOCS)
2719: nz_a = info(MAT_INFO_NZ_ALLOCATED)
2720: .ve
2722: Level: intermediate
2724: Developer Note: fortran interface is not autogenerated as the f90
2725: interface definition cannot be generated correctly [due to MatInfo]
2727: .seealso: MatStashGetInfo()
2729: @*/
2730: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2731: {
2736: MatCheckPreallocated(mat,1);
2737: (*mat->ops->getinfo)(mat,flag,info);
2738: return 0;
2739: }
2741: /*
2742: This is used by external packages where it is not easy to get the info from the actual
2743: matrix factorization.
2744: */
2745: PetscErrorCode MatGetInfo_External(Mat A,MatInfoType flag,MatInfo *info)
2746: {
2747: PetscMemzero(info,sizeof(MatInfo));
2748: return 0;
2749: }
2751: /* ----------------------------------------------------------*/
2753: /*@C
2754: MatLUFactor - Performs in-place LU factorization of matrix.
2756: Collective on Mat
2758: Input Parameters:
2759: + mat - the matrix
2760: . row - row permutation
2761: . col - column permutation
2762: - info - options for factorization, includes
2763: $ fill - expected fill as ratio of original fill.
2764: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2765: $ Run with the option -info to determine an optimal value to use
2767: Notes:
2768: Most users should employ the simplified KSP interface for linear solvers
2769: instead of working directly with matrix algebra routines such as this.
2770: See, e.g., KSPCreate().
2772: This changes the state of the matrix to a factored matrix; it cannot be used
2773: for example with MatSetValues() unless one first calls MatSetUnfactored().
2775: Level: developer
2777: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2778: MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()
2780: Developer Note: fortran interface is not autogenerated as the f90
2781: interface definition cannot be generated correctly [due to MatFactorInfo]
2783: @*/
2784: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2785: {
2786: MatFactorInfo tinfo;
2796: MatCheckPreallocated(mat,1);
2797: if (!info) {
2798: MatFactorInfoInitialize(&tinfo);
2799: info = &tinfo;
2800: }
2802: PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2803: (*mat->ops->lufactor)(mat,row,col,info);
2804: PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2805: PetscObjectStateIncrease((PetscObject)mat);
2806: return 0;
2807: }
2809: /*@C
2810: MatILUFactor - Performs in-place ILU factorization of matrix.
2812: Collective on Mat
2814: Input Parameters:
2815: + mat - the matrix
2816: . row - row permutation
2817: . col - column permutation
2818: - info - structure containing
2819: $ levels - number of levels of fill.
2820: $ expected fill - as ratio of original fill.
2821: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2822: missing diagonal entries)
2824: Notes:
2825: Probably really in-place only when level of fill is zero, otherwise allocates
2826: new space to store factored matrix and deletes previous memory.
2828: Most users should employ the simplified KSP interface for linear solvers
2829: instead of working directly with matrix algebra routines such as this.
2830: See, e.g., KSPCreate().
2832: Level: developer
2834: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
2836: Developer Note: fortran interface is not autogenerated as the f90
2837: interface definition cannot be generated correctly [due to MatFactorInfo]
2839: @*/
2840: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2841: {
2851: MatCheckPreallocated(mat,1);
2853: PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2854: (*mat->ops->ilufactor)(mat,row,col,info);
2855: PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2856: PetscObjectStateIncrease((PetscObject)mat);
2857: return 0;
2858: }
2860: /*@C
2861: MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2862: Call this routine before calling MatLUFactorNumeric().
2864: Collective on Mat
2866: Input Parameters:
2867: + fact - the factor matrix obtained with MatGetFactor()
2868: . mat - the matrix
2869: . row, col - row and column permutations
2870: - info - options for factorization, includes
2871: $ fill - expected fill as ratio of original fill.
2872: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2873: $ Run with the option -info to determine an optimal value to use
2875: Notes:
2876: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
2878: Most users should employ the simplified KSP interface for linear solvers
2879: instead of working directly with matrix algebra routines such as this.
2880: See, e.g., KSPCreate().
2882: Level: developer
2884: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()
2886: Developer Note: fortran interface is not autogenerated as the f90
2887: interface definition cannot be generated correctly [due to MatFactorInfo]
2889: @*/
2890: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2891: {
2892: MatFactorInfo tinfo;
2902: if (!(fact)->ops->lufactorsymbolic) {
2903: MatSolverType stype;
2904: MatFactorGetSolverType(fact,&stype);
2905: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,stype);
2906: }
2907: MatCheckPreallocated(mat,2);
2908: if (!info) {
2909: MatFactorInfoInitialize(&tinfo);
2910: info = &tinfo;
2911: }
2913: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2914: (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2915: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2916: PetscObjectStateIncrease((PetscObject)fact);
2917: return 0;
2918: }
2920: /*@C
2921: MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2922: Call this routine after first calling MatLUFactorSymbolic().
2924: Collective on Mat
2926: Input Parameters:
2927: + fact - the factor matrix obtained with MatGetFactor()
2928: . mat - the matrix
2929: - info - options for factorization
2931: Notes:
2932: See MatLUFactor() for in-place factorization. See
2933: MatCholeskyFactorNumeric() for the symmetric, positive definite case.
2935: Most users should employ the simplified KSP interface for linear solvers
2936: instead of working directly with matrix algebra routines such as this.
2937: See, e.g., KSPCreate().
2939: Level: developer
2941: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()
2943: Developer Note: fortran interface is not autogenerated as the f90
2944: interface definition cannot be generated correctly [due to MatFactorInfo]
2946: @*/
2947: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
2948: {
2949: MatFactorInfo tinfo;
2959: MatCheckPreallocated(mat,2);
2960: if (!info) {
2961: MatFactorInfoInitialize(&tinfo);
2962: info = &tinfo;
2963: }
2965: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
2966: else PetscLogEventBegin(MAT_LUFactor,mat,fact,0,0);
2967: (fact->ops->lufactornumeric)(fact,mat,info);
2968: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
2969: else PetscLogEventEnd(MAT_LUFactor,mat,fact,0,0);
2970: MatViewFromOptions(fact,NULL,"-mat_factor_view");
2971: PetscObjectStateIncrease((PetscObject)fact);
2972: return 0;
2973: }
2975: /*@C
2976: MatCholeskyFactor - Performs in-place Cholesky factorization of a
2977: symmetric matrix.
2979: Collective on Mat
2981: Input Parameters:
2982: + mat - the matrix
2983: . perm - row and column permutations
2984: - f - expected fill as ratio of original fill
2986: Notes:
2987: See MatLUFactor() for the nonsymmetric case. See also
2988: MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().
2990: Most users should employ the simplified KSP interface for linear solvers
2991: instead of working directly with matrix algebra routines such as this.
2992: See, e.g., KSPCreate().
2994: Level: developer
2996: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
2997: MatGetOrdering()
2999: Developer Note: fortran interface is not autogenerated as the f90
3000: interface definition cannot be generated correctly [due to MatFactorInfo]
3002: @*/
3003: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3004: {
3005: MatFactorInfo tinfo;
3015: MatCheckPreallocated(mat,1);
3016: if (!info) {
3017: MatFactorInfoInitialize(&tinfo);
3018: info = &tinfo;
3019: }
3021: PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3022: (*mat->ops->choleskyfactor)(mat,perm,info);
3023: PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3024: PetscObjectStateIncrease((PetscObject)mat);
3025: return 0;
3026: }
3028: /*@C
3029: MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3030: of a symmetric matrix.
3032: Collective on Mat
3034: Input Parameters:
3035: + fact - the factor matrix obtained with MatGetFactor()
3036: . mat - the matrix
3037: . perm - row and column permutations
3038: - info - options for factorization, includes
3039: $ fill - expected fill as ratio of original fill.
3040: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3041: $ Run with the option -info to determine an optimal value to use
3043: Notes:
3044: See MatLUFactorSymbolic() for the nonsymmetric case. See also
3045: MatCholeskyFactor() and MatCholeskyFactorNumeric().
3047: Most users should employ the simplified KSP interface for linear solvers
3048: instead of working directly with matrix algebra routines such as this.
3049: See, e.g., KSPCreate().
3051: Level: developer
3053: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3054: MatGetOrdering()
3056: Developer Note: fortran interface is not autogenerated as the f90
3057: interface definition cannot be generated correctly [due to MatFactorInfo]
3059: @*/
3060: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3061: {
3062: MatFactorInfo tinfo;
3072: if (!(fact)->ops->choleskyfactorsymbolic) {
3073: MatSolverType stype;
3074: MatFactorGetSolverType(fact,&stype);
3075: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,stype);
3076: }
3077: MatCheckPreallocated(mat,2);
3078: if (!info) {
3079: MatFactorInfoInitialize(&tinfo);
3080: info = &tinfo;
3081: }
3083: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3084: (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3085: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3086: PetscObjectStateIncrease((PetscObject)fact);
3087: return 0;
3088: }
3090: /*@C
3091: MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3092: of a symmetric matrix. Call this routine after first calling
3093: MatCholeskyFactorSymbolic().
3095: Collective on Mat
3097: Input Parameters:
3098: + fact - the factor matrix obtained with MatGetFactor()
3099: . mat - the initial matrix
3100: . info - options for factorization
3101: - fact - the symbolic factor of mat
3103: Notes:
3104: Most users should employ the simplified KSP interface for linear solvers
3105: instead of working directly with matrix algebra routines such as this.
3106: See, e.g., KSPCreate().
3108: Level: developer
3110: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()
3112: Developer Note: fortran interface is not autogenerated as the f90
3113: interface definition cannot be generated correctly [due to MatFactorInfo]
3115: @*/
3116: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3117: {
3118: MatFactorInfo tinfo;
3127: MatCheckPreallocated(mat,2);
3128: if (!info) {
3129: MatFactorInfoInitialize(&tinfo);
3130: info = &tinfo;
3131: }
3133: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3134: else PetscLogEventBegin(MAT_CholeskyFactor,mat,fact,0,0);
3135: (fact->ops->choleskyfactornumeric)(fact,mat,info);
3136: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3137: else PetscLogEventEnd(MAT_CholeskyFactor,mat,fact,0,0);
3138: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3139: PetscObjectStateIncrease((PetscObject)fact);
3140: return 0;
3141: }
3143: /*@
3144: MatQRFactor - Performs in-place QR factorization of matrix.
3146: Collective on Mat
3148: Input Parameters:
3149: + mat - the matrix
3150: . col - column permutation
3151: - info - options for factorization, includes
3152: $ fill - expected fill as ratio of original fill.
3153: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3154: $ Run with the option -info to determine an optimal value to use
3156: Notes:
3157: Most users should employ the simplified KSP interface for linear solvers
3158: instead of working directly with matrix algebra routines such as this.
3159: See, e.g., KSPCreate().
3161: This changes the state of the matrix to a factored matrix; it cannot be used
3162: for example with MatSetValues() unless one first calls MatSetUnfactored().
3164: Level: developer
3166: .seealso: MatQRFactorSymbolic(), MatQRFactorNumeric(), MatLUFactor(),
3167: MatSetUnfactored(), MatFactorInfo, MatGetFactor()
3169: Developer Note: fortran interface is not autogenerated as the f90
3170: interface definition cannot be generated correctly [due to MatFactorInfo]
3172: @*/
3173: PetscErrorCode MatQRFactor(Mat mat, IS col, const MatFactorInfo *info)
3174: {
3181: MatCheckPreallocated(mat,1);
3182: PetscLogEventBegin(MAT_QRFactor,mat,col,0,0);
3183: PetscUseMethod(mat,"MatQRFactor_C", (Mat,IS,const MatFactorInfo*), (mat, col, info));
3184: PetscLogEventEnd(MAT_QRFactor,mat,col,0,0);
3185: PetscObjectStateIncrease((PetscObject)mat);
3186: return 0;
3187: }
3189: /*@
3190: MatQRFactorSymbolic - Performs symbolic QR factorization of matrix.
3191: Call this routine before calling MatQRFactorNumeric().
3193: Collective on Mat
3195: Input Parameters:
3196: + fact - the factor matrix obtained with MatGetFactor()
3197: . mat - the matrix
3198: . col - column permutation
3199: - info - options for factorization, includes
3200: $ fill - expected fill as ratio of original fill.
3201: $ dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3202: $ Run with the option -info to determine an optimal value to use
3204: Most users should employ the simplified KSP interface for linear solvers
3205: instead of working directly with matrix algebra routines such as this.
3206: See, e.g., KSPCreate().
3208: Level: developer
3210: .seealso: MatQRFactor(), MatQRFactorNumeric(), MatLUFactor(), MatFactorInfo, MatFactorInfoInitialize()
3212: Developer Note: fortran interface is not autogenerated as the f90
3213: interface definition cannot be generated correctly [due to MatFactorInfo]
3215: @*/
3216: PetscErrorCode MatQRFactorSymbolic(Mat fact,Mat mat,IS col,const MatFactorInfo *info)
3217: {
3218: MatFactorInfo tinfo;
3227: MatCheckPreallocated(mat,2);
3228: if (!info) {
3229: MatFactorInfoInitialize(&tinfo);
3230: info = &tinfo;
3231: }
3233: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_QRFactorSymbolic,fact,mat,col,0);
3234: PetscUseMethod(fact,"MatQRFactorSymbolic_C", (Mat,Mat,IS,const MatFactorInfo*), (fact, mat, col, info));
3235: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_QRFactorSymbolic,fact,mat,col,0);
3236: PetscObjectStateIncrease((PetscObject)fact);
3237: return 0;
3238: }
3240: /*@
3241: MatQRFactorNumeric - Performs numeric QR factorization of a matrix.
3242: Call this routine after first calling MatQRFactorSymbolic().
3244: Collective on Mat
3246: Input Parameters:
3247: + fact - the factor matrix obtained with MatGetFactor()
3248: . mat - the matrix
3249: - info - options for factorization
3251: Notes:
3252: See MatQRFactor() for in-place factorization.
3254: Most users should employ the simplified KSP interface for linear solvers
3255: instead of working directly with matrix algebra routines such as this.
3256: See, e.g., KSPCreate().
3258: Level: developer
3260: .seealso: MatQRFactorSymbolic(), MatLUFactor()
3262: Developer Note: fortran interface is not autogenerated as the f90
3263: interface definition cannot be generated correctly [due to MatFactorInfo]
3265: @*/
3266: PetscErrorCode MatQRFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3267: {
3268: MatFactorInfo tinfo;
3277: MatCheckPreallocated(mat,2);
3278: if (!info) {
3279: MatFactorInfoInitialize(&tinfo);
3280: info = &tinfo;
3281: }
3283: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_QRFactorNumeric,mat,fact,0,0);
3284: else PetscLogEventBegin(MAT_QRFactor,mat,fact,0,0);
3285: PetscUseMethod(fact,"MatQRFactorNumeric_C", (Mat,Mat,const MatFactorInfo*), (fact, mat, info));
3286: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_QRFactorNumeric,mat,fact,0,0);
3287: else PetscLogEventEnd(MAT_QRFactor,mat,fact,0,0);
3288: MatViewFromOptions(fact,NULL,"-mat_factor_view");
3289: PetscObjectStateIncrease((PetscObject)fact);
3290: return 0;
3291: }
3293: /* ----------------------------------------------------------------*/
3294: /*@
3295: MatSolve - Solves A x = b, given a factored matrix.
3297: Neighbor-wise Collective on Mat
3299: Input Parameters:
3300: + mat - the factored matrix
3301: - b - the right-hand-side vector
3303: Output Parameter:
3304: . x - the result vector
3306: Notes:
3307: The vectors b and x cannot be the same. I.e., one cannot
3308: call MatSolve(A,x,x).
3310: Notes:
3311: Most users should employ the simplified KSP interface for linear solvers
3312: instead of working directly with matrix algebra routines such as this.
3313: See, e.g., KSPCreate().
3315: Level: developer
3317: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3318: @*/
3319: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3320: {
3331: if (!mat->rmap->N && !mat->cmap->N) return 0;
3332: MatCheckPreallocated(mat,1);
3334: PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3335: if (mat->factorerrortype) {
3336: PetscInfo(mat,"MatFactorError %d\n",mat->factorerrortype);
3337: VecSetInf(x);
3338: } else {
3340: (*mat->ops->solve)(mat,b,x);
3341: }
3342: PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3343: PetscObjectStateIncrease((PetscObject)x);
3344: return 0;
3345: }
3347: static PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X,PetscBool trans)
3348: {
3349: Vec b,x;
3350: PetscInt N,i;
3351: PetscErrorCode (*f)(Mat,Vec,Vec);
3352: PetscBool Abound,Bneedconv = PETSC_FALSE,Xneedconv = PETSC_FALSE;
3354: if (A->factorerrortype) {
3355: PetscInfo(A,"MatFactorError %d\n",A->factorerrortype);
3356: MatSetInf(X);
3357: return 0;
3358: }
3359: f = (!trans || (!A->ops->solvetranspose && A->symmetric)) ? A->ops->solve : A->ops->solvetranspose;
3361: MatBoundToCPU(A,&Abound);
3362: if (!Abound) {
3363: PetscObjectTypeCompareAny((PetscObject)B,&Bneedconv,MATSEQDENSE,MATMPIDENSE,"");
3364: PetscObjectTypeCompareAny((PetscObject)X,&Xneedconv,MATSEQDENSE,MATMPIDENSE,"");
3365: }
3366: if (Bneedconv) {
3367: MatConvert(B,MATDENSECUDA,MAT_INPLACE_MATRIX,&B);
3368: }
3369: if (Xneedconv) {
3370: MatConvert(X,MATDENSECUDA,MAT_INPLACE_MATRIX,&X);
3371: }
3372: MatGetSize(B,NULL,&N);
3373: for (i=0; i<N; i++) {
3374: MatDenseGetColumnVecRead(B,i,&b);
3375: MatDenseGetColumnVecWrite(X,i,&x);
3376: (*f)(A,b,x);
3377: MatDenseRestoreColumnVecWrite(X,i,&x);
3378: MatDenseRestoreColumnVecRead(B,i,&b);
3379: }
3380: if (Bneedconv) {
3381: MatConvert(B,MATDENSE,MAT_INPLACE_MATRIX,&B);
3382: }
3383: if (Xneedconv) {
3384: MatConvert(X,MATDENSE,MAT_INPLACE_MATRIX,&X);
3385: }
3386: return 0;
3387: }
3389: /*@
3390: MatMatSolve - Solves A X = B, given a factored matrix.
3392: Neighbor-wise Collective on Mat
3394: Input Parameters:
3395: + A - the factored matrix
3396: - B - the right-hand-side matrix MATDENSE (or sparse -- when using MUMPS)
3398: Output Parameter:
3399: . X - the result matrix (dense matrix)
3401: Notes:
3402: If B is a MATDENSE matrix then one can call MatMatSolve(A,B,B) except with MKL_CPARDISO;
3403: otherwise, B and X cannot be the same.
3405: Notes:
3406: Most users should usually employ the simplified KSP interface for linear solvers
3407: instead of working directly with matrix algebra routines such as this.
3408: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3409: at a time.
3411: Level: developer
3413: .seealso: MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3414: @*/
3415: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3416: {
3426: if (!A->rmap->N && !A->cmap->N) return 0;
3428: MatCheckPreallocated(A,1);
3430: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3431: if (!A->ops->matsolve) {
3432: PetscInfo(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3433: MatMatSolve_Basic(A,B,X,PETSC_FALSE);
3434: } else {
3435: (*A->ops->matsolve)(A,B,X);
3436: }
3437: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3438: PetscObjectStateIncrease((PetscObject)X);
3439: return 0;
3440: }
3442: /*@
3443: MatMatSolveTranspose - Solves A^T X = B, given a factored matrix.
3445: Neighbor-wise Collective on Mat
3447: Input Parameters:
3448: + A - the factored matrix
3449: - B - the right-hand-side matrix (dense matrix)
3451: Output Parameter:
3452: . X - the result matrix (dense matrix)
3454: Notes:
3455: The matrices B and X cannot be the same. I.e., one cannot
3456: call MatMatSolveTranspose(A,X,X).
3458: Notes:
3459: Most users should usually employ the simplified KSP interface for linear solvers
3460: instead of working directly with matrix algebra routines such as this.
3461: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3462: at a time.
3464: When using SuperLU_Dist or MUMPS as a parallel solver, PETSc will use their functionality to solve multiple right hand sides simultaneously.
3466: Level: developer
3468: .seealso: MatMatSolve(), MatLUFactor(), MatCholeskyFactor()
3469: @*/
3470: PetscErrorCode MatMatSolveTranspose(Mat A,Mat B,Mat X)
3471: {
3483: if (!A->rmap->N && !A->cmap->N) return 0;
3485: MatCheckPreallocated(A,1);
3487: PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3488: if (!A->ops->matsolvetranspose) {
3489: PetscInfo(A,"Mat type %s using basic MatMatSolveTranspose\n",((PetscObject)A)->type_name);
3490: MatMatSolve_Basic(A,B,X,PETSC_TRUE);
3491: } else {
3492: (*A->ops->matsolvetranspose)(A,B,X);
3493: }
3494: PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3495: PetscObjectStateIncrease((PetscObject)X);
3496: return 0;
3497: }
3499: /*@
3500: MatMatTransposeSolve - Solves A X = B^T, given a factored matrix.
3502: Neighbor-wise Collective on Mat
3504: Input Parameters:
3505: + A - the factored matrix
3506: - Bt - the transpose of right-hand-side matrix
3508: Output Parameter:
3509: . X - the result matrix (dense matrix)
3511: Notes:
3512: Most users should usually employ the simplified KSP interface for linear solvers
3513: instead of working directly with matrix algebra routines such as this.
3514: See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3515: at a time.
3517: For MUMPS, it only supports centralized sparse compressed column format on the host processor for right hand side matrix. User must create B^T in sparse compressed row format on the host processor and call MatMatTransposeSolve() to implement MUMPS' MatMatSolve().
3519: Level: developer
3521: .seealso: MatMatSolve(), MatMatSolveTranspose(), MatLUFactor(), MatCholeskyFactor()
3522: @*/
3523: PetscErrorCode MatMatTransposeSolve(Mat A,Mat Bt,Mat X)
3524: {
3536: if (!A->rmap->N && !A->cmap->N) return 0;
3538: MatCheckPreallocated(A,1);
3541: PetscLogEventBegin(MAT_MatTrSolve,A,Bt,X,0);
3542: (*A->ops->mattransposesolve)(A,Bt,X);
3543: PetscLogEventEnd(MAT_MatTrSolve,A,Bt,X,0);
3544: PetscObjectStateIncrease((PetscObject)X);
3545: return 0;
3546: }
3548: /*@
3549: MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3550: U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,
3552: Neighbor-wise Collective on Mat
3554: Input Parameters:
3555: + mat - the factored matrix
3556: - b - the right-hand-side vector
3558: Output Parameter:
3559: . x - the result vector
3561: Notes:
3562: MatSolve() should be used for most applications, as it performs
3563: a forward solve followed by a backward solve.
3565: The vectors b and x cannot be the same, i.e., one cannot
3566: call MatForwardSolve(A,x,x).
3568: For matrix in seqsbaij format with block size larger than 1,
3569: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3570: MatForwardSolve() solves U^T*D y = b, and
3571: MatBackwardSolve() solves U x = y.
3572: Thus they do not provide a symmetric preconditioner.
3574: Most users should employ the simplified KSP interface for linear solvers
3575: instead of working directly with matrix algebra routines such as this.
3576: See, e.g., KSPCreate().
3578: Level: developer
3580: .seealso: MatSolve(), MatBackwardSolve()
3581: @*/
3582: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3583: {
3594: if (!mat->rmap->N && !mat->cmap->N) return 0;
3595: MatCheckPreallocated(mat,1);
3598: PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3599: (*mat->ops->forwardsolve)(mat,b,x);
3600: PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3601: PetscObjectStateIncrease((PetscObject)x);
3602: return 0;
3603: }
3605: /*@
3606: MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3607: D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,
3609: Neighbor-wise Collective on Mat
3611: Input Parameters:
3612: + mat - the factored matrix
3613: - b - the right-hand-side vector
3615: Output Parameter:
3616: . x - the result vector
3618: Notes:
3619: MatSolve() should be used for most applications, as it performs
3620: a forward solve followed by a backward solve.
3622: The vectors b and x cannot be the same. I.e., one cannot
3623: call MatBackwardSolve(A,x,x).
3625: For matrix in seqsbaij format with block size larger than 1,
3626: the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3627: MatForwardSolve() solves U^T*D y = b, and
3628: MatBackwardSolve() solves U x = y.
3629: Thus they do not provide a symmetric preconditioner.
3631: Most users should employ the simplified KSP interface for linear solvers
3632: instead of working directly with matrix algebra routines such as this.
3633: See, e.g., KSPCreate().
3635: Level: developer
3637: .seealso: MatSolve(), MatForwardSolve()
3638: @*/
3639: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3640: {
3651: if (!mat->rmap->N && !mat->cmap->N) return 0;
3652: MatCheckPreallocated(mat,1);
3655: PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3656: (*mat->ops->backwardsolve)(mat,b,x);
3657: PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3658: PetscObjectStateIncrease((PetscObject)x);
3659: return 0;
3660: }
3662: /*@
3663: MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.
3665: Neighbor-wise Collective on Mat
3667: Input Parameters:
3668: + mat - the factored matrix
3669: . b - the right-hand-side vector
3670: - y - the vector to be added to
3672: Output Parameter:
3673: . x - the result vector
3675: Notes:
3676: The vectors b and x cannot be the same. I.e., one cannot
3677: call MatSolveAdd(A,x,y,x).
3679: Most users should employ the simplified KSP interface for linear solvers
3680: instead of working directly with matrix algebra routines such as this.
3681: See, e.g., KSPCreate().
3683: Level: developer
3685: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3686: @*/
3687: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3688: {
3689: PetscScalar one = 1.0;
3690: Vec tmp;
3706: if (!mat->rmap->N && !mat->cmap->N) return 0;
3707: MatCheckPreallocated(mat,1);
3709: PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3710: if (mat->factorerrortype) {
3712: PetscInfo(mat,"MatFactorError %d\n",mat->factorerrortype);
3713: VecSetInf(x);
3714: } else if (mat->ops->solveadd) {
3715: (*mat->ops->solveadd)(mat,b,y,x);
3716: } else {
3717: /* do the solve then the add manually */
3718: if (x != y) {
3719: MatSolve(mat,b,x);
3720: VecAXPY(x,one,y);
3721: } else {
3722: VecDuplicate(x,&tmp);
3723: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3724: VecCopy(x,tmp);
3725: MatSolve(mat,b,x);
3726: VecAXPY(x,one,tmp);
3727: VecDestroy(&tmp);
3728: }
3729: }
3730: PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3731: PetscObjectStateIncrease((PetscObject)x);
3732: return 0;
3733: }
3735: /*@
3736: MatSolveTranspose - Solves A' x = b, given a factored matrix.
3738: Neighbor-wise Collective on Mat
3740: Input Parameters:
3741: + mat - the factored matrix
3742: - b - the right-hand-side vector
3744: Output Parameter:
3745: . x - the result vector
3747: Notes:
3748: The vectors b and x cannot be the same. I.e., one cannot
3749: call MatSolveTranspose(A,x,x).
3751: Most users should employ the simplified KSP interface for linear solvers
3752: instead of working directly with matrix algebra routines such as this.
3753: See, e.g., KSPCreate().
3755: Level: developer
3757: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3758: @*/
3759: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3760: {
3761: PetscErrorCode (*f)(Mat,Vec,Vec) = (!mat->ops->solvetranspose && mat->symmetric) ? mat->ops->solve : mat->ops->solvetranspose;
3772: if (!mat->rmap->N && !mat->cmap->N) return 0;
3773: MatCheckPreallocated(mat,1);
3774: PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3775: if (mat->factorerrortype) {
3776: PetscInfo(mat,"MatFactorError %d\n",mat->factorerrortype);
3777: VecSetInf(x);
3778: } else {
3780: (*f)(mat,b,x);
3781: }
3782: PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3783: PetscObjectStateIncrease((PetscObject)x);
3784: return 0;
3785: }
3787: /*@
3788: MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3789: factored matrix.
3791: Neighbor-wise Collective on Mat
3793: Input Parameters:
3794: + mat - the factored matrix
3795: . b - the right-hand-side vector
3796: - y - the vector to be added to
3798: Output Parameter:
3799: . x - the result vector
3801: Notes:
3802: The vectors b and x cannot be the same. I.e., one cannot
3803: call MatSolveTransposeAdd(A,x,y,x).
3805: Most users should employ the simplified KSP interface for linear solvers
3806: instead of working directly with matrix algebra routines such as this.
3807: See, e.g., KSPCreate().
3809: Level: developer
3811: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3812: @*/
3813: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3814: {
3815: PetscScalar one = 1.0;
3816: Vec tmp;
3817: PetscErrorCode (*f)(Mat,Vec,Vec,Vec) = (!mat->ops->solvetransposeadd && mat->symmetric) ? mat->ops->solveadd : mat->ops->solvetransposeadd;
3832: if (!mat->rmap->N && !mat->cmap->N) return 0;
3833: MatCheckPreallocated(mat,1);
3835: PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3836: if (mat->factorerrortype) {
3837: PetscInfo(mat,"MatFactorError %d\n",mat->factorerrortype);
3838: VecSetInf(x);
3839: } else if (f) {
3840: (*f)(mat,b,y,x);
3841: } else {
3842: /* do the solve then the add manually */
3843: if (x != y) {
3844: MatSolveTranspose(mat,b,x);
3845: VecAXPY(x,one,y);
3846: } else {
3847: VecDuplicate(x,&tmp);
3848: PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3849: VecCopy(x,tmp);
3850: MatSolveTranspose(mat,b,x);
3851: VecAXPY(x,one,tmp);
3852: VecDestroy(&tmp);
3853: }
3854: }
3855: PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3856: PetscObjectStateIncrease((PetscObject)x);
3857: return 0;
3858: }
3859: /* ----------------------------------------------------------------*/
3861: /*@
3862: MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.
3864: Neighbor-wise Collective on Mat
3866: Input Parameters:
3867: + mat - the matrix
3868: . b - the right hand side
3869: . omega - the relaxation factor
3870: . flag - flag indicating the type of SOR (see below)
3871: . shift - diagonal shift
3872: . its - the number of iterations
3873: - lits - the number of local iterations
3875: Output Parameter:
3876: . x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)
3878: SOR Flags:
3879: + SOR_FORWARD_SWEEP - forward SOR
3880: . SOR_BACKWARD_SWEEP - backward SOR
3881: . SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3882: . SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3883: . SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3884: . SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3885: . SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3886: upper/lower triangular part of matrix to
3887: vector (with omega)
3888: - SOR_ZERO_INITIAL_GUESS - zero initial guess
3890: Notes:
3891: SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3892: SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3893: on each processor.
3895: Application programmers will not generally use MatSOR() directly,
3896: but instead will employ the KSP/PC interface.
3898: Notes:
3899: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing
3901: Notes for Advanced Users:
3902: The flags are implemented as bitwise inclusive or operations.
3903: For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3904: to specify a zero initial guess for SSOR.
3906: Most users should employ the simplified KSP interface for linear solvers
3907: instead of working directly with matrix algebra routines such as this.
3908: See, e.g., KSPCreate().
3910: Vectors x and b CANNOT be the same
3912: Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes
3914: Level: developer
3916: @*/
3917: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3918: {
3935: MatCheckPreallocated(mat,1);
3936: PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3937: (*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3938: PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3939: PetscObjectStateIncrease((PetscObject)x);
3940: return 0;
3941: }
3943: /*
3944: Default matrix copy routine.
3945: */
3946: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3947: {
3948: PetscInt i,rstart = 0,rend = 0,nz;
3949: const PetscInt *cwork;
3950: const PetscScalar *vwork;
3952: if (B->assembled) {
3953: MatZeroEntries(B);
3954: }
3955: if (str == SAME_NONZERO_PATTERN) {
3956: MatGetOwnershipRange(A,&rstart,&rend);
3957: for (i=rstart; i<rend; i++) {
3958: MatGetRow(A,i,&nz,&cwork,&vwork);
3959: MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3960: MatRestoreRow(A,i,&nz,&cwork,&vwork);
3961: }
3962: } else {
3963: MatAYPX(B,0.0,A,str);
3964: }
3965: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3966: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3967: return 0;
3968: }
3970: /*@
3971: MatCopy - Copies a matrix to another matrix.
3973: Collective on Mat
3975: Input Parameters:
3976: + A - the matrix
3977: - str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN
3979: Output Parameter:
3980: . B - where the copy is put
3982: Notes:
3983: If you use SAME_NONZERO_PATTERN then the two matrices must have the same nonzero pattern or the routine will crash.
3985: MatCopy() copies the matrix entries of a matrix to another existing
3986: matrix (after first zeroing the second matrix). A related routine is
3987: MatConvert(), which first creates a new matrix and then copies the data.
3989: Level: intermediate
3991: .seealso: MatConvert(), MatDuplicate()
3992: @*/
3993: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3994: {
3995: PetscInt i;
4002: MatCheckPreallocated(B,2);
4006: MatCheckPreallocated(A,1);
4007: if (A == B) return 0;
4009: PetscLogEventBegin(MAT_Copy,A,B,0,0);
4010: if (A->ops->copy) {
4011: (*A->ops->copy)(A,B,str);
4012: } else { /* generic conversion */
4013: MatCopy_Basic(A,B,str);
4014: }
4016: B->stencil.dim = A->stencil.dim;
4017: B->stencil.noc = A->stencil.noc;
4018: for (i=0; i<=A->stencil.dim; i++) {
4019: B->stencil.dims[i] = A->stencil.dims[i];
4020: B->stencil.starts[i] = A->stencil.starts[i];
4021: }
4023: PetscLogEventEnd(MAT_Copy,A,B,0,0);
4024: PetscObjectStateIncrease((PetscObject)B);
4025: return 0;
4026: }
4028: /*@C
4029: MatConvert - Converts a matrix to another matrix, either of the same
4030: or different type.
4032: Collective on Mat
4034: Input Parameters:
4035: + mat - the matrix
4036: . newtype - new matrix type. Use MATSAME to create a new matrix of the
4037: same type as the original matrix.
4038: - reuse - denotes if the destination matrix is to be created or reused.
4039: Use MAT_INPLACE_MATRIX for inplace conversion (that is when you want the input mat to be changed to contain the matrix in the new format), otherwise use
4040: MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX (can only be used after the first call was made with MAT_INITIAL_MATRIX, causes the matrix space in M to be reused).
4042: Output Parameter:
4043: . M - pointer to place new matrix
4045: Notes:
4046: MatConvert() first creates a new matrix and then copies the data from
4047: the first matrix. A related routine is MatCopy(), which copies the matrix
4048: entries of one matrix to another already existing matrix context.
4050: Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
4051: the MPI communicator of the generated matrix is always the same as the communicator
4052: of the input matrix.
4054: Level: intermediate
4056: .seealso: MatCopy(), MatDuplicate()
4057: @*/
4058: PetscErrorCode MatConvert(Mat mat,MatType newtype,MatReuse reuse,Mat *M)
4059: {
4060: PetscBool sametype,issame,flg,issymmetric,ishermitian;
4061: char convname[256],mtype[256];
4062: Mat B;
4069: MatCheckPreallocated(mat,1);
4071: PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,sizeof(mtype),&flg);
4072: if (flg) newtype = mtype;
4074: PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
4075: PetscStrcmp(newtype,"same",&issame);
4079: if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) {
4080: PetscInfo(mat,"Early return for inplace %s %d %d\n",((PetscObject)mat)->type_name,sametype,issame);
4081: return 0;
4082: }
4084: /* Cache Mat options because some converter use MatHeaderReplace */
4085: issymmetric = mat->symmetric;
4086: ishermitian = mat->hermitian;
4088: if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
4089: PetscInfo(mat,"Calling duplicate for initial matrix %s %d %d\n",((PetscObject)mat)->type_name,sametype,issame);
4090: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4091: } else {
4092: PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
4093: const char *prefix[3] = {"seq","mpi",""};
4094: PetscInt i;
4095: /*
4096: Order of precedence:
4097: 0) See if newtype is a superclass of the current matrix.
4098: 1) See if a specialized converter is known to the current matrix.
4099: 2) See if a specialized converter is known to the desired matrix class.
4100: 3) See if a good general converter is registered for the desired class
4101: (as of 6/27/03 only MATMPIADJ falls into this category).
4102: 4) See if a good general converter is known for the current matrix.
4103: 5) Use a really basic converter.
4104: */
4106: /* 0) See if newtype is a superclass of the current matrix.
4107: i.e mat is mpiaij and newtype is aij */
4108: for (i=0; i<2; i++) {
4109: PetscStrncpy(convname,prefix[i],sizeof(convname));
4110: PetscStrlcat(convname,newtype,sizeof(convname));
4111: PetscStrcmp(convname,((PetscObject)mat)->type_name,&flg);
4112: PetscInfo(mat,"Check superclass %s %s -> %d\n",convname,((PetscObject)mat)->type_name,flg);
4113: if (flg) {
4114: if (reuse == MAT_INPLACE_MATRIX) {
4115: PetscInfo(mat,"Early return\n");
4116: return 0;
4117: } else if (reuse == MAT_INITIAL_MATRIX && mat->ops->duplicate) {
4118: PetscInfo(mat,"Calling MatDuplicate\n");
4119: (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
4120: return 0;
4121: } else if (reuse == MAT_REUSE_MATRIX && mat->ops->copy) {
4122: PetscInfo(mat,"Calling MatCopy\n");
4123: MatCopy(mat,*M,SAME_NONZERO_PATTERN);
4124: return 0;
4125: }
4126: }
4127: }
4128: /* 1) See if a specialized converter is known to the current matrix and the desired class */
4129: for (i=0; i<3; i++) {
4130: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4131: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4132: PetscStrlcat(convname,"_",sizeof(convname));
4133: PetscStrlcat(convname,prefix[i],sizeof(convname));
4134: PetscStrlcat(convname,issame ? ((PetscObject)mat)->type_name : newtype,sizeof(convname));
4135: PetscStrlcat(convname,"_C",sizeof(convname));
4136: PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
4137: PetscInfo(mat,"Check specialized (1) %s (%s) -> %d\n",convname,((PetscObject)mat)->type_name,!!conv);
4138: if (conv) goto foundconv;
4139: }
4141: /* 2) See if a specialized converter is known to the desired matrix class. */
4142: MatCreate(PetscObjectComm((PetscObject)mat),&B);
4143: MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
4144: MatSetType(B,newtype);
4145: for (i=0; i<3; i++) {
4146: PetscStrncpy(convname,"MatConvert_",sizeof(convname));
4147: PetscStrlcat(convname,((PetscObject)mat)->type_name,sizeof(convname));
4148: PetscStrlcat(convname,"_",sizeof(convname));
4149: PetscStrlcat(convname,prefix[i],sizeof(convname));
4150: PetscStrlcat(convname,newtype,sizeof(convname));
4151: PetscStrlcat(convname,"_C",sizeof(convname));
4152: PetscObjectQueryFunction((PetscObject)B,convname,&conv);
4153: PetscInfo(mat,"Check specialized (2) %s (%s) -> %d\n",convname,((PetscObject)B)->type_name,!!conv);
4154: if (conv) {
4155: MatDestroy(&B);
4156: goto foundconv;
4157: }
4158: }
4160: /* 3) See if a good general converter is registered for the desired class */
4161: conv = B->ops->convertfrom;
4162: PetscInfo(mat,"Check convertfrom (%s) -> %d\n",((PetscObject)B)->type_name,!!conv);
4163: MatDestroy(&B);
4164: if (conv) goto foundconv;
4166: /* 4) See if a good general converter is known for the current matrix */
4167: if (mat->ops->convert) conv = mat->ops->convert;
4168: PetscInfo(mat,"Check general convert (%s) -> %d\n",((PetscObject)mat)->type_name,!!conv);
4169: if (conv) goto foundconv;
4171: /* 5) Use a really basic converter. */
4172: PetscInfo(mat,"Using MatConvert_Basic\n");
4173: conv = MatConvert_Basic;
4175: foundconv:
4176: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4177: (*conv)(mat,newtype,reuse,M);
4178: if (mat->rmap->mapping && mat->cmap->mapping && !(*M)->rmap->mapping && !(*M)->cmap->mapping) {
4179: /* the block sizes must be same if the mappings are copied over */
4180: (*M)->rmap->bs = mat->rmap->bs;
4181: (*M)->cmap->bs = mat->cmap->bs;
4182: PetscObjectReference((PetscObject)mat->rmap->mapping);
4183: PetscObjectReference((PetscObject)mat->cmap->mapping);
4184: (*M)->rmap->mapping = mat->rmap->mapping;
4185: (*M)->cmap->mapping = mat->cmap->mapping;
4186: }
4187: (*M)->stencil.dim = mat->stencil.dim;
4188: (*M)->stencil.noc = mat->stencil.noc;
4189: for (i=0; i<=mat->stencil.dim; i++) {
4190: (*M)->stencil.dims[i] = mat->stencil.dims[i];
4191: (*M)->stencil.starts[i] = mat->stencil.starts[i];
4192: }
4193: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4194: }
4195: PetscObjectStateIncrease((PetscObject)*M);
4197: /* Copy Mat options */
4198: if (issymmetric) {
4199: MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);
4200: }
4201: if (ishermitian) {
4202: MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);
4203: }
4204: return 0;
4205: }
4207: /*@C
4208: MatFactorGetSolverType - Returns name of the package providing the factorization routines
4210: Not Collective
4212: Input Parameter:
4213: . mat - the matrix, must be a factored matrix
4215: Output Parameter:
4216: . type - the string name of the package (do not free this string)
4218: Notes:
4219: In Fortran you pass in a empty string and the package name will be copied into it.
4220: (Make sure the string is long enough)
4222: Level: intermediate
4224: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4225: @*/
4226: PetscErrorCode MatFactorGetSolverType(Mat mat, MatSolverType *type)
4227: {
4228: PetscErrorCode (*conv)(Mat,MatSolverType*);
4234: PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverType_C",&conv);
4235: if (conv) (*conv)(mat,type);
4236: else *type = MATSOLVERPETSC;
4237: return 0;
4238: }
4240: typedef struct _MatSolverTypeForSpecifcType* MatSolverTypeForSpecifcType;
4241: struct _MatSolverTypeForSpecifcType {
4242: MatType mtype;
4243: /* no entry for MAT_FACTOR_NONE */
4244: PetscErrorCode (*createfactor[MAT_FACTOR_NUM_TYPES-1])(Mat,MatFactorType,Mat*);
4245: MatSolverTypeForSpecifcType next;
4246: };
4248: typedef struct _MatSolverTypeHolder* MatSolverTypeHolder;
4249: struct _MatSolverTypeHolder {
4250: char *name;
4251: MatSolverTypeForSpecifcType handlers;
4252: MatSolverTypeHolder next;
4253: };
4255: static MatSolverTypeHolder MatSolverTypeHolders = NULL;
4257: /*@C
4258: MatSolverTypeRegister - Registers a MatSolverType that works for a particular matrix type
4260: Input Parameters:
4261: + package - name of the package, for example petsc or superlu
4262: . mtype - the matrix type that works with this package
4263: . ftype - the type of factorization supported by the package
4264: - createfactor - routine that will create the factored matrix ready to be used
4266: Level: intermediate
4268: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4269: @*/
4270: PetscErrorCode MatSolverTypeRegister(MatSolverType package,MatType mtype,MatFactorType ftype,PetscErrorCode (*createfactor)(Mat,MatFactorType,Mat*))
4271: {
4272: MatSolverTypeHolder next = MatSolverTypeHolders,prev = NULL;
4273: PetscBool flg;
4274: MatSolverTypeForSpecifcType inext,iprev = NULL;
4276: MatInitializePackage();
4277: if (!next) {
4278: PetscNew(&MatSolverTypeHolders);
4279: PetscStrallocpy(package,&MatSolverTypeHolders->name);
4280: PetscNew(&MatSolverTypeHolders->handlers);
4281: PetscStrallocpy(mtype,(char **)&MatSolverTypeHolders->handlers->mtype);
4282: MatSolverTypeHolders->handlers->createfactor[(int)ftype-1] = createfactor;
4283: return 0;
4284: }
4285: while (next) {
4286: PetscStrcasecmp(package,next->name,&flg);
4287: if (flg) {
4289: inext = next->handlers;
4290: while (inext) {
4291: PetscStrcasecmp(mtype,inext->mtype,&flg);
4292: if (flg) {
4293: inext->createfactor[(int)ftype-1] = createfactor;
4294: return 0;
4295: }
4296: iprev = inext;
4297: inext = inext->next;
4298: }
4299: PetscNew(&iprev->next);
4300: PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4301: iprev->next->createfactor[(int)ftype-1] = createfactor;
4302: return 0;
4303: }
4304: prev = next;
4305: next = next->next;
4306: }
4307: PetscNew(&prev->next);
4308: PetscStrallocpy(package,&prev->next->name);
4309: PetscNew(&prev->next->handlers);
4310: PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4311: prev->next->handlers->createfactor[(int)ftype-1] = createfactor;
4312: return 0;
4313: }
4315: /*@C
4316: MatSolverTypeGet - Gets the function that creates the factor matrix if it exist
4318: Input Parameters:
4319: + type - name of the package, for example petsc or superlu
4320: . ftype - the type of factorization supported by the type
4321: - mtype - the matrix type that works with this type
4323: Output Parameters:
4324: + foundtype - PETSC_TRUE if the type was registered
4325: . foundmtype - PETSC_TRUE if the type supports the requested mtype
4326: - createfactor - routine that will create the factored matrix ready to be used or NULL if not found
4328: Level: intermediate
4330: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatSolverTypeRegister(), MatGetFactor()
4331: @*/
4332: PetscErrorCode MatSolverTypeGet(MatSolverType type,MatType mtype,MatFactorType ftype,PetscBool *foundtype,PetscBool *foundmtype,PetscErrorCode (**createfactor)(Mat,MatFactorType,Mat*))
4333: {
4334: MatSolverTypeHolder next = MatSolverTypeHolders;
4335: PetscBool flg;
4336: MatSolverTypeForSpecifcType inext;
4338: if (foundtype) *foundtype = PETSC_FALSE;
4339: if (foundmtype) *foundmtype = PETSC_FALSE;
4340: if (createfactor) *createfactor = NULL;
4342: if (type) {
4343: while (next) {
4344: PetscStrcasecmp(type,next->name,&flg);
4345: if (flg) {
4346: if (foundtype) *foundtype = PETSC_TRUE;
4347: inext = next->handlers;
4348: while (inext) {
4349: PetscStrbeginswith(mtype,inext->mtype,&flg);
4350: if (flg) {
4351: if (foundmtype) *foundmtype = PETSC_TRUE;
4352: if (createfactor) *createfactor = inext->createfactor[(int)ftype-1];
4353: return 0;
4354: }
4355: inext = inext->next;
4356: }
4357: }
4358: next = next->next;
4359: }
4360: } else {
4361: while (next) {
4362: inext = next->handlers;
4363: while (inext) {
4364: PetscStrcmp(mtype,inext->mtype,&flg);
4365: if (flg && inext->createfactor[(int)ftype-1]) {
4366: if (foundtype) *foundtype = PETSC_TRUE;
4367: if (foundmtype) *foundmtype = PETSC_TRUE;
4368: if (createfactor) *createfactor = inext->createfactor[(int)ftype-1];
4369: return 0;
4370: }
4371: inext = inext->next;
4372: }
4373: next = next->next;
4374: }
4375: /* try with base classes inext->mtype */
4376: next = MatSolverTypeHolders;
4377: while (next) {
4378: inext = next->handlers;
4379: while (inext) {
4380: PetscStrbeginswith(mtype,inext->mtype,&flg);
4381: if (flg && inext->createfactor[(int)ftype-1]) {
4382: if (foundtype) *foundtype = PETSC_TRUE;
4383: if (foundmtype) *foundmtype = PETSC_TRUE;
4384: if (createfactor) *createfactor = inext->createfactor[(int)ftype-1];
4385: return 0;
4386: }
4387: inext = inext->next;
4388: }
4389: next = next->next;
4390: }
4391: }
4392: return 0;
4393: }
4395: PetscErrorCode MatSolverTypeDestroy(void)
4396: {
4397: MatSolverTypeHolder next = MatSolverTypeHolders,prev;
4398: MatSolverTypeForSpecifcType inext,iprev;
4400: while (next) {
4401: PetscFree(next->name);
4402: inext = next->handlers;
4403: while (inext) {
4404: PetscFree(inext->mtype);
4405: iprev = inext;
4406: inext = inext->next;
4407: PetscFree(iprev);
4408: }
4409: prev = next;
4410: next = next->next;
4411: PetscFree(prev);
4412: }
4413: MatSolverTypeHolders = NULL;
4414: return 0;
4415: }
4417: /*@C
4418: MatFactorGetCanUseOrdering - Indicates if the factorization can use the ordering provided in MatLUFactorSymbolic(), MatCholeskyFactorSymbolic()
4420: Logically Collective on Mat
4422: Input Parameters:
4423: . mat - the matrix
4425: Output Parameters:
4426: . flg - PETSC_TRUE if uses the ordering
4428: Notes:
4429: Most internal PETSc factorizations use the ordering passed to the factorization routine but external
4430: packages do not, thus we want to skip generating the ordering when it is not needed or used.
4432: Level: developer
4434: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor(), MatLUFactorSymbolic(), MatCholeskyFactorSymbolic()
4435: @*/
4436: PetscErrorCode MatFactorGetCanUseOrdering(Mat mat, PetscBool *flg)
4437: {
4438: *flg = mat->canuseordering;
4439: return 0;
4440: }
4442: /*@C
4443: MatFactorGetPreferredOrdering - The preferred ordering for a particular matrix factor object
4445: Logically Collective on Mat
4447: Input Parameters:
4448: . mat - the matrix
4450: Output Parameters:
4451: . otype - the preferred type
4453: Level: developer
4455: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor(), MatLUFactorSymbolic(), MatCholeskyFactorSymbolic()
4456: @*/
4457: PetscErrorCode MatFactorGetPreferredOrdering(Mat mat, MatFactorType ftype, MatOrderingType *otype)
4458: {
4459: *otype = mat->preferredordering[ftype];
4461: return 0;
4462: }
4464: /*@C
4465: MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()
4467: Collective on Mat
4469: Input Parameters:
4470: + mat - the matrix
4471: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4472: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4474: Output Parameters:
4475: . f - the factor matrix used with MatXXFactorSymbolic() calls
4477: Notes:
4478: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4479: such as pastix, superlu, mumps etc.
4481: PETSc must have been ./configure to use the external solver, using the option --download-package
4483: Developer Notes:
4484: This should actually be called MatCreateFactor() since it creates a new factor object
4486: Level: intermediate
4488: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatFactorGetCanUseOrdering(), MatSolverTypeRegister()
4489: @*/
4490: PetscErrorCode MatGetFactor(Mat mat, MatSolverType type,MatFactorType ftype,Mat *f)
4491: {
4492: PetscBool foundtype,foundmtype;
4493: PetscErrorCode (*conv)(Mat,MatFactorType,Mat*);
4499: MatCheckPreallocated(mat,1);
4501: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,&foundtype,&foundmtype,&conv);
4502: if (!foundtype) {
4503: if (type) {
4504: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver type %s for factorization type %s and matrix type %s. Perhaps you must ./configure with --download-%s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name,type);
4505: } else {
4506: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver type for factorization type %s and matrix type %s.",MatFactorTypes[ftype],((PetscObject)mat)->type_name);
4507: }
4508: }
4512: (*conv)(mat,ftype,f);
4513: return 0;
4514: }
4516: /*@C
4517: MatGetFactorAvailable - Returns a a flag if matrix supports particular type and factor type
4519: Not Collective
4521: Input Parameters:
4522: + mat - the matrix
4523: . type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4524: - ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,
4526: Output Parameter:
4527: . flg - PETSC_TRUE if the factorization is available
4529: Notes:
4530: Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4531: such as pastix, superlu, mumps etc.
4533: PETSc must have been ./configure to use the external solver, using the option --download-package
4535: Developer Notes:
4536: This should actually be called MatCreateFactorAvailable() since MatGetFactor() creates a new factor object
4538: Level: intermediate
4540: .seealso: MatCopy(), MatDuplicate(), MatGetFactor(), MatSolverTypeRegister()
4541: @*/
4542: PetscErrorCode MatGetFactorAvailable(Mat mat, MatSolverType type,MatFactorType ftype,PetscBool *flg)
4543: {
4544: PetscErrorCode (*gconv)(Mat,MatFactorType,Mat*);
4551: MatCheckPreallocated(mat,1);
4553: MatSolverTypeGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4554: *flg = gconv ? PETSC_TRUE : PETSC_FALSE;
4555: return 0;
4556: }
4558: /*@
4559: MatDuplicate - Duplicates a matrix including the non-zero structure.
4561: Collective on Mat
4563: Input Parameters:
4564: + mat - the matrix
4565: - op - One of MAT_DO_NOT_COPY_VALUES, MAT_COPY_VALUES, or MAT_SHARE_NONZERO_PATTERN.
4566: See the manual page for MatDuplicateOption for an explanation of these options.
4568: Output Parameter:
4569: . M - pointer to place new matrix
4571: Level: intermediate
4573: Notes:
4574: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.
4575: May be called with an unassembled input Mat if MAT_DO_NOT_COPY_VALUES is used, in which case the output Mat is unassembled as well.
4576: When original mat is a product of matrix operation, e.g., an output of MatMatMult() or MatCreateSubMatrix(), only the simple matrix data structure of mat is duplicated and the internal data structures created for the reuse of previous matrix operations are not duplicated. User should not use MatDuplicate() to create new matrix M if M is intended to be reused as the product of matrix operation.
4578: .seealso: MatCopy(), MatConvert(), MatDuplicateOption
4579: @*/
4580: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4581: {
4582: Mat B;
4583: VecType vtype;
4584: PetscInt i;
4585: PetscObject dm;
4586: void (*viewf)(void);
4593: MatCheckPreallocated(mat,1);
4595: *M = NULL;
4597: PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4598: (*mat->ops->duplicate)(mat,op,M);
4599: PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4600: B = *M;
4602: MatGetOperation(mat,MATOP_VIEW,&viewf);
4603: if (viewf) {
4604: MatSetOperation(B,MATOP_VIEW,viewf);
4605: }
4606: MatGetVecType(mat,&vtype);
4607: MatSetVecType(B,vtype);
4609: B->stencil.dim = mat->stencil.dim;
4610: B->stencil.noc = mat->stencil.noc;
4611: for (i=0; i<=mat->stencil.dim; i++) {
4612: B->stencil.dims[i] = mat->stencil.dims[i];
4613: B->stencil.starts[i] = mat->stencil.starts[i];
4614: }
4616: B->nooffproczerorows = mat->nooffproczerorows;
4617: B->nooffprocentries = mat->nooffprocentries;
4619: PetscObjectQuery((PetscObject) mat, "__PETSc_dm", &dm);
4620: if (dm) {
4621: PetscObjectCompose((PetscObject) B, "__PETSc_dm", dm);
4622: }
4623: PetscObjectStateIncrease((PetscObject)B);
4624: return 0;
4625: }
4627: /*@
4628: MatGetDiagonal - Gets the diagonal of a matrix.
4630: Logically Collective on Mat
4632: Input Parameters:
4633: + mat - the matrix
4634: - v - the vector for storing the diagonal
4636: Output Parameter:
4637: . v - the diagonal of the matrix
4639: Level: intermediate
4641: Note:
4642: Currently only correct in parallel for square matrices.
4644: .seealso: MatGetRow(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs()
4645: @*/
4646: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4647: {
4653: MatCheckPreallocated(mat,1);
4655: (*mat->ops->getdiagonal)(mat,v);
4656: PetscObjectStateIncrease((PetscObject)v);
4657: return 0;
4658: }
4660: /*@C
4661: MatGetRowMin - Gets the minimum value (of the real part) of each
4662: row of the matrix
4664: Logically Collective on Mat
4666: Input Parameter:
4667: . mat - the matrix
4669: Output Parameters:
4670: + v - the vector for storing the maximums
4671: - idx - the indices of the column found for each row (optional)
4673: Level: intermediate
4675: Notes:
4676: The result of this call are the same as if one converted the matrix to dense format
4677: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4679: This code is only implemented for a couple of matrix formats.
4681: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(),
4682: MatGetRowMax()
4683: @*/
4684: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4685: {
4691: if (!mat->cmap->N) {
4692: VecSet(v,PETSC_MAX_REAL);
4693: if (idx) {
4694: PetscInt i,m = mat->rmap->n;
4695: for (i=0; i<m; i++) idx[i] = -1;
4696: }
4697: } else {
4699: MatCheckPreallocated(mat,1);
4700: }
4701: (*mat->ops->getrowmin)(mat,v,idx);
4702: PetscObjectStateIncrease((PetscObject)v);
4703: return 0;
4704: }
4706: /*@C
4707: MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4708: row of the matrix
4710: Logically Collective on Mat
4712: Input Parameter:
4713: . mat - the matrix
4715: Output Parameters:
4716: + v - the vector for storing the minimums
4717: - idx - the indices of the column found for each row (or NULL if not needed)
4719: Level: intermediate
4721: Notes:
4722: if a row is completely empty or has only 0.0 values then the idx[] value for that
4723: row is 0 (the first column).
4725: This code is only implemented for a couple of matrix formats.
4727: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4728: @*/
4729: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4730: {
4737: if (!mat->cmap->N) {
4738: VecSet(v,0.0);
4739: if (idx) {
4740: PetscInt i,m = mat->rmap->n;
4741: for (i=0; i<m; i++) idx[i] = -1;
4742: }
4743: } else {
4745: MatCheckPreallocated(mat,1);
4746: if (idx) PetscArrayzero(idx,mat->rmap->n);
4747: (*mat->ops->getrowminabs)(mat,v,idx);
4748: }
4749: PetscObjectStateIncrease((PetscObject)v);
4750: return 0;
4751: }
4753: /*@C
4754: MatGetRowMax - Gets the maximum value (of the real part) of each
4755: row of the matrix
4757: Logically Collective on Mat
4759: Input Parameter:
4760: . mat - the matrix
4762: Output Parameters:
4763: + v - the vector for storing the maximums
4764: - idx - the indices of the column found for each row (optional)
4766: Level: intermediate
4768: Notes:
4769: The result of this call are the same as if one converted the matrix to dense format
4770: and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).
4772: This code is only implemented for a couple of matrix formats.
4774: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4775: @*/
4776: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4777: {
4783: if (!mat->cmap->N) {
4784: VecSet(v,PETSC_MIN_REAL);
4785: if (idx) {
4786: PetscInt i,m = mat->rmap->n;
4787: for (i=0; i<m; i++) idx[i] = -1;
4788: }
4789: } else {
4791: MatCheckPreallocated(mat,1);
4792: (*mat->ops->getrowmax)(mat,v,idx);
4793: }
4794: PetscObjectStateIncrease((PetscObject)v);
4795: return 0;
4796: }
4798: /*@C
4799: MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4800: row of the matrix
4802: Logically Collective on Mat
4804: Input Parameter:
4805: . mat - the matrix
4807: Output Parameters:
4808: + v - the vector for storing the maximums
4809: - idx - the indices of the column found for each row (or NULL if not needed)
4811: Level: intermediate
4813: Notes:
4814: if a row is completely empty or has only 0.0 values then the idx[] value for that
4815: row is 0 (the first column).
4817: This code is only implemented for a couple of matrix formats.
4819: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4820: @*/
4821: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4822: {
4828: if (!mat->cmap->N) {
4829: VecSet(v,0.0);
4830: if (idx) {
4831: PetscInt i,m = mat->rmap->n;
4832: for (i=0; i<m; i++) idx[i] = -1;
4833: }
4834: } else {
4836: MatCheckPreallocated(mat,1);
4837: if (idx) PetscArrayzero(idx,mat->rmap->n);
4838: (*mat->ops->getrowmaxabs)(mat,v,idx);
4839: }
4840: PetscObjectStateIncrease((PetscObject)v);
4841: return 0;
4842: }
4844: /*@
4845: MatGetRowSum - Gets the sum of each row of the matrix
4847: Logically or Neighborhood Collective on Mat
4849: Input Parameters:
4850: . mat - the matrix
4852: Output Parameter:
4853: . v - the vector for storing the sum of rows
4855: Level: intermediate
4857: Notes:
4858: This code is slow since it is not currently specialized for different formats
4860: .seealso: MatGetDiagonal(), MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRowMax(), MatGetRowMin()
4861: @*/
4862: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4863: {
4864: Vec ones;
4870: MatCheckPreallocated(mat,1);
4871: MatCreateVecs(mat,&ones,NULL);
4872: VecSet(ones,1.);
4873: MatMult(mat,ones,v);
4874: VecDestroy(&ones);
4875: return 0;
4876: }
4878: /*@
4879: MatTranspose - Computes an in-place or out-of-place transpose of a matrix.
4881: Collective on Mat
4883: Input Parameters:
4884: + mat - the matrix to transpose
4885: - reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX
4887: Output Parameter:
4888: . B - the transpose
4890: Notes:
4891: If you use MAT_INPLACE_MATRIX then you must pass in &mat for B
4893: MAT_REUSE_MATRIX causes the B matrix from a previous call to this function with MAT_INITIAL_MATRIX to be used
4895: Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.
4897: Level: intermediate
4899: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4900: @*/
4901: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4902: {
4910: MatCheckPreallocated(mat,1);
4912: PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4913: (*mat->ops->transpose)(mat,reuse,B);
4914: PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4915: if (B) PetscObjectStateIncrease((PetscObject)*B);
4916: return 0;
4917: }
4919: /*@
4920: MatIsTranspose - Test whether a matrix is another one's transpose,
4921: or its own, in which case it tests symmetry.
4923: Collective on Mat
4925: Input Parameters:
4926: + A - the matrix to test
4927: - B - the matrix to test against, this can equal the first parameter
4929: Output Parameters:
4930: . flg - the result
4932: Notes:
4933: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4934: has a running time of the order of the number of nonzeros; the parallel
4935: test involves parallel copies of the block-offdiagonal parts of the matrix.
4937: Level: intermediate
4939: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4940: @*/
4941: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
4942: {
4943: PetscErrorCode (*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
4948: PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4949: PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4950: *flg = PETSC_FALSE;
4951: if (f && g) {
4953: (*f)(A,B,tol,flg);
4954: } else {
4955: MatType mattype;
4957: MatGetType(f ? B : A,&mattype);
4958: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type %s does not support checking for transpose",mattype);
4959: }
4960: return 0;
4961: }
4963: /*@
4964: MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.
4966: Collective on Mat
4968: Input Parameters:
4969: + mat - the matrix to transpose and complex conjugate
4970: - reuse - either MAT_INITIAL_MATRIX, MAT_REUSE_MATRIX, or MAT_INPLACE_MATRIX
4972: Output Parameter:
4973: . B - the Hermitian
4975: Level: intermediate
4977: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4978: @*/
4979: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4980: {
4981: MatTranspose(mat,reuse,B);
4982: #if defined(PETSC_USE_COMPLEX)
4983: MatConjugate(*B);
4984: #endif
4985: return 0;
4986: }
4988: /*@
4989: MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,
4991: Collective on Mat
4993: Input Parameters:
4994: + A - the matrix to test
4995: - B - the matrix to test against, this can equal the first parameter
4997: Output Parameters:
4998: . flg - the result
5000: Notes:
5001: Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
5002: has a running time of the order of the number of nonzeros; the parallel
5003: test involves parallel copies of the block-offdiagonal parts of the matrix.
5005: Level: intermediate
5007: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
5008: @*/
5009: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool *flg)
5010: {
5011: PetscErrorCode (*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);
5016: PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
5017: PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
5018: if (f && g) {
5020: (*f)(A,B,tol,flg);
5021: }
5022: return 0;
5023: }
5025: /*@
5026: MatPermute - Creates a new matrix with rows and columns permuted from the
5027: original.
5029: Collective on Mat
5031: Input Parameters:
5032: + mat - the matrix to permute
5033: . row - row permutation, each processor supplies only the permutation for its rows
5034: - col - column permutation, each processor supplies only the permutation for its columns
5036: Output Parameters:
5037: . B - the permuted matrix
5039: Level: advanced
5041: Note:
5042: The index sets map from row/col of permuted matrix to row/col of original matrix.
5043: The index sets should be on the same communicator as Mat and have the same local sizes.
5045: Developer Note:
5046: If you want to implement MatPermute for a matrix type, and your approach doesn't
5047: exploit the fact that row and col are permutations, consider implementing the
5048: more general MatCreateSubMatrix() instead.
5050: .seealso: MatGetOrdering(), ISAllGather()
5052: @*/
5053: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
5054: {
5065: MatCheckPreallocated(mat,1);
5067: if (mat->ops->permute) {
5068: (*mat->ops->permute)(mat,row,col,B);
5069: PetscObjectStateIncrease((PetscObject)*B);
5070: } else {
5071: MatCreateSubMatrix(mat, row, col, MAT_INITIAL_MATRIX, B);
5072: }
5073: return 0;
5074: }
5076: /*@
5077: MatEqual - Compares two matrices.
5079: Collective on Mat
5081: Input Parameters:
5082: + A - the first matrix
5083: - B - the second matrix
5085: Output Parameter:
5086: . flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.
5088: Level: intermediate
5090: @*/
5091: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool *flg)
5092: {
5099: MatCheckPreallocated(A,1);
5100: MatCheckPreallocated(B,2);
5104: if (A->ops->equal && A->ops->equal == B->ops->equal) {
5105: (*A->ops->equal)(A,B,flg);
5106: } else {
5107: MatMultEqual(A,B,10,flg);
5108: }
5109: return 0;
5110: }
5112: /*@
5113: MatDiagonalScale - Scales a matrix on the left and right by diagonal
5114: matrices that are stored as vectors. Either of the two scaling
5115: matrices can be NULL.
5117: Collective on Mat
5119: Input Parameters:
5120: + mat - the matrix to be scaled
5121: . l - the left scaling vector (or NULL)
5122: - r - the right scaling vector (or NULL)
5124: Notes:
5125: MatDiagonalScale() computes A = LAR, where
5126: L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
5127: The L scales the rows of the matrix, the R scales the columns of the matrix.
5129: Level: intermediate
5131: .seealso: MatScale(), MatShift(), MatDiagonalSet()
5132: @*/
5133: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
5134: {
5141: MatCheckPreallocated(mat,1);
5142: if (!l && !r) return 0;
5145: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5146: (*mat->ops->diagonalscale)(mat,l,r);
5147: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5148: PetscObjectStateIncrease((PetscObject)mat);
5149: if (l != r && mat->symmetric) mat->symmetric = PETSC_FALSE;
5150: return 0;
5151: }
5153: /*@
5154: MatScale - Scales all elements of a matrix by a given number.
5156: Logically Collective on Mat
5158: Input Parameters:
5159: + mat - the matrix to be scaled
5160: - a - the scaling value
5162: Output Parameter:
5163: . mat - the scaled matrix
5165: Level: intermediate
5167: .seealso: MatDiagonalScale()
5168: @*/
5169: PetscErrorCode MatScale(Mat mat,PetscScalar a)
5170: {
5177: MatCheckPreallocated(mat,1);
5179: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
5180: if (a != (PetscScalar)1.0) {
5181: (*mat->ops->scale)(mat,a);
5182: PetscObjectStateIncrease((PetscObject)mat);
5183: }
5184: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
5185: return 0;
5186: }
5188: /*@
5189: MatNorm - Calculates various norms of a matrix.
5191: Collective on Mat
5193: Input Parameters:
5194: + mat - the matrix
5195: - type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY
5197: Output Parameter:
5198: . nrm - the resulting norm
5200: Level: intermediate
5202: @*/
5203: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5204: {
5212: MatCheckPreallocated(mat,1);
5214: (*mat->ops->norm)(mat,type,nrm);
5215: return 0;
5216: }
5218: /*
5219: This variable is used to prevent counting of MatAssemblyBegin() that
5220: are called from within a MatAssemblyEnd().
5221: */
5222: static PetscInt MatAssemblyEnd_InUse = 0;
5223: /*@
5224: MatAssemblyBegin - Begins assembling the matrix. This routine should
5225: be called after completing all calls to MatSetValues().
5227: Collective on Mat
5229: Input Parameters:
5230: + mat - the matrix
5231: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5233: Notes:
5234: MatSetValues() generally caches the values. The matrix is ready to
5235: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5236: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5237: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5238: using the matrix.
5240: ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5241: same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5242: a global collective operation requring all processes that share the matrix.
5244: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5245: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5246: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5248: Level: beginner
5250: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5251: @*/
5252: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5253: {
5256: MatCheckPreallocated(mat,1);
5258: if (mat->assembled) {
5259: mat->was_assembled = PETSC_TRUE;
5260: mat->assembled = PETSC_FALSE;
5261: }
5263: if (!MatAssemblyEnd_InUse) {
5264: PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5265: if (mat->ops->assemblybegin) (*mat->ops->assemblybegin)(mat,type);
5266: PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5267: } else if (mat->ops->assemblybegin) {
5268: (*mat->ops->assemblybegin)(mat,type);
5269: }
5270: return 0;
5271: }
5273: /*@
5274: MatAssembled - Indicates if a matrix has been assembled and is ready for
5275: use; for example, in matrix-vector product.
5277: Not Collective
5279: Input Parameter:
5280: . mat - the matrix
5282: Output Parameter:
5283: . assembled - PETSC_TRUE or PETSC_FALSE
5285: Level: advanced
5287: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5288: @*/
5289: PetscErrorCode MatAssembled(Mat mat,PetscBool *assembled)
5290: {
5293: *assembled = mat->assembled;
5294: return 0;
5295: }
5297: /*@
5298: MatAssemblyEnd - Completes assembling the matrix. This routine should
5299: be called after MatAssemblyBegin().
5301: Collective on Mat
5303: Input Parameters:
5304: + mat - the matrix
5305: - type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY
5307: Options Database Keys:
5308: + -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5309: . -mat_view ::ascii_info_detail - Prints more detailed info
5310: . -mat_view - Prints matrix in ASCII format
5311: . -mat_view ::ascii_matlab - Prints matrix in Matlab format
5312: . -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5313: . -display <name> - Sets display name (default is host)
5314: . -draw_pause <sec> - Sets number of seconds to pause after display
5315: . -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: ch_matlab)
5316: . -viewer_socket_machine <machine> - Machine to use for socket
5317: . -viewer_socket_port <port> - Port number to use for socket
5318: - -mat_view binary:filename[:append] - Save matrix to file in binary format
5320: Notes:
5321: MatSetValues() generally caches the values. The matrix is ready to
5322: use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5323: Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5324: in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5325: using the matrix.
5327: Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5328: out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5329: before MAT_FINAL_ASSEMBLY so the space is not compressed out.
5331: Level: beginner
5333: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5334: @*/
5335: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5336: {
5337: static PetscInt inassm = 0;
5338: PetscBool flg = PETSC_FALSE;
5343: inassm++;
5344: MatAssemblyEnd_InUse++;
5345: if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5346: PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5347: if (mat->ops->assemblyend) {
5348: (*mat->ops->assemblyend)(mat,type);
5349: }
5350: PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5351: } else if (mat->ops->assemblyend) {
5352: (*mat->ops->assemblyend)(mat,type);
5353: }
5355: /* Flush assembly is not a true assembly */
5356: if (type != MAT_FLUSH_ASSEMBLY) {
5357: mat->num_ass++;
5358: mat->assembled = PETSC_TRUE;
5359: mat->ass_nonzerostate = mat->nonzerostate;
5360: }
5362: mat->insertmode = NOT_SET_VALUES;
5363: MatAssemblyEnd_InUse--;
5364: PetscObjectStateIncrease((PetscObject)mat);
5365: if (!mat->symmetric_eternal) {
5366: mat->symmetric_set = PETSC_FALSE;
5367: mat->hermitian_set = PETSC_FALSE;
5368: mat->structurally_symmetric_set = PETSC_FALSE;
5369: }
5370: if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5371: MatViewFromOptions(mat,NULL,"-mat_view");
5373: if (mat->checksymmetryonassembly) {
5374: MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5375: if (flg) {
5376: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5377: } else {
5378: PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5379: }
5380: }
5381: if (mat->nullsp && mat->checknullspaceonassembly) {
5382: MatNullSpaceTest(mat->nullsp,mat,NULL);
5383: }
5384: }
5385: inassm--;
5386: return 0;
5387: }
5389: /*@
5390: MatSetOption - Sets a parameter option for a matrix. Some options
5391: may be specific to certain storage formats. Some options
5392: determine how values will be inserted (or added). Sorted,
5393: row-oriented input will generally assemble the fastest. The default
5394: is row-oriented.
5396: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5398: Input Parameters:
5399: + mat - the matrix
5400: . option - the option, one of those listed below (and possibly others),
5401: - flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5403: Options Describing Matrix Structure:
5404: + MAT_SPD - symmetric positive definite
5405: . MAT_SYMMETRIC - symmetric in terms of both structure and value
5406: . MAT_HERMITIAN - transpose is the complex conjugation
5407: . MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5408: - MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5409: you set to be kept with all future use of the matrix
5410: including after MatAssemblyBegin/End() which could
5411: potentially change the symmetry structure, i.e. you
5412: KNOW the matrix will ALWAYS have the property you set.
5413: Note that setting this flag alone implies nothing about whether the matrix is symmetric/Hermitian;
5414: the relevant flags must be set independently.
5416: Options For Use with MatSetValues():
5417: Insert a logically dense subblock, which can be
5418: . MAT_ROW_ORIENTED - row-oriented (default)
5420: Note these options reflect the data you pass in with MatSetValues(); it has
5421: nothing to do with how the data is stored internally in the matrix
5422: data structure.
5424: When (re)assembling a matrix, we can restrict the input for
5425: efficiency/debugging purposes. These options include
5426: + MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5427: . MAT_FORCE_DIAGONAL_ENTRIES - forces diagonal entries to be allocated
5428: . MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5429: . MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5430: . MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5431: . MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5432: any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5433: performance for very large process counts.
5434: - MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5435: of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5436: functions, instead sending only neighbor messages.
5438: Notes:
5439: Except for MAT_UNUSED_NONZERO_LOCATION_ERR and MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!
5441: Some options are relevant only for particular matrix types and
5442: are thus ignored by others. Other options are not supported by
5443: certain matrix types and will generate an error message if set.
5445: If using a Fortran 77 module to compute a matrix, one may need to
5446: use the column-oriented option (or convert to the row-oriented
5447: format).
5449: MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5450: that would generate a new entry in the nonzero structure is instead
5451: ignored. Thus, if memory has not alredy been allocated for this particular
5452: data, then the insertion is ignored. For dense matrices, in which
5453: the entire array is allocated, no entries are ever ignored.
5454: Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5456: MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5457: that would generate a new entry in the nonzero structure instead produces
5458: an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5460: MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5461: that would generate a new entry that has not been preallocated will
5462: instead produce an error. (Currently supported for AIJ and BAIJ formats
5463: only.) This is a useful flag when debugging matrix memory preallocation.
5464: If this option is set then the MatAssemblyBegin/End() processes has one less global reduction
5466: MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5467: other processors should be dropped, rather than stashed.
5468: This is useful if you know that the "owning" processor is also
5469: always generating the correct matrix entries, so that PETSc need
5470: not transfer duplicate entries generated on another processor.
5472: MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5473: searches during matrix assembly. When this flag is set, the hash table
5474: is created during the first Matrix Assembly. This hash table is
5475: used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5476: to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5477: should be used with MAT_USE_HASH_TABLE flag. This option is currently
5478: supported by MATMPIBAIJ format only.
5480: MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5481: are kept in the nonzero structure
5483: MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5484: a zero location in the matrix
5486: MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types
5488: MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5489: zero row routines and thus improves performance for very large process counts.
5491: MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5492: part of the matrix (since they should match the upper triangular part).
5494: MAT_SORTED_FULL - each process provides exactly its local rows; all column indices for a given row are passed in a
5495: single call to MatSetValues(), preallocation is perfect, row oriented, INSERT_VALUES is used. Common
5496: with finite difference schemes with non-periodic boundary conditions.
5498: Level: intermediate
5500: .seealso: MatOption, Mat
5502: @*/
5503: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5504: {
5506: if (op > 0) {
5509: }
5513: switch (op) {
5514: case MAT_FORCE_DIAGONAL_ENTRIES:
5515: mat->force_diagonals = flg;
5516: return 0;
5517: case MAT_NO_OFF_PROC_ENTRIES:
5518: mat->nooffprocentries = flg;
5519: return 0;
5520: case MAT_SUBSET_OFF_PROC_ENTRIES:
5521: mat->assembly_subset = flg;
5522: if (!mat->assembly_subset) { /* See the same logic in VecAssembly wrt VEC_SUBSET_OFF_PROC_ENTRIES */
5523: #if !defined(PETSC_HAVE_MPIUNI)
5524: MatStashScatterDestroy_BTS(&mat->stash);
5525: #endif
5526: mat->stash.first_assembly_done = PETSC_FALSE;
5527: }
5528: return 0;
5529: case MAT_NO_OFF_PROC_ZERO_ROWS:
5530: mat->nooffproczerorows = flg;
5531: return 0;
5532: case MAT_SPD:
5533: mat->spd_set = PETSC_TRUE;
5534: mat->spd = flg;
5535: if (flg) {
5536: mat->symmetric = PETSC_TRUE;
5537: mat->structurally_symmetric = PETSC_TRUE;
5538: mat->symmetric_set = PETSC_TRUE;
5539: mat->structurally_symmetric_set = PETSC_TRUE;
5540: }
5541: break;
5542: case MAT_SYMMETRIC:
5543: mat->symmetric = flg;
5544: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5545: mat->symmetric_set = PETSC_TRUE;
5546: mat->structurally_symmetric_set = flg;
5547: #if !defined(PETSC_USE_COMPLEX)
5548: mat->hermitian = flg;
5549: mat->hermitian_set = PETSC_TRUE;
5550: #endif
5551: break;
5552: case MAT_HERMITIAN:
5553: mat->hermitian = flg;
5554: if (flg) mat->structurally_symmetric = PETSC_TRUE;
5555: mat->hermitian_set = PETSC_TRUE;
5556: mat->structurally_symmetric_set = flg;
5557: #if !defined(PETSC_USE_COMPLEX)
5558: mat->symmetric = flg;
5559: mat->symmetric_set = PETSC_TRUE;
5560: #endif
5561: break;
5562: case MAT_STRUCTURALLY_SYMMETRIC:
5563: mat->structurally_symmetric = flg;
5564: mat->structurally_symmetric_set = PETSC_TRUE;
5565: break;
5566: case MAT_SYMMETRY_ETERNAL:
5567: mat->symmetric_eternal = flg;
5568: break;
5569: case MAT_STRUCTURE_ONLY:
5570: mat->structure_only = flg;
5571: break;
5572: case MAT_SORTED_FULL:
5573: mat->sortedfull = flg;
5574: break;
5575: default:
5576: break;
5577: }
5578: if (mat->ops->setoption) {
5579: (*mat->ops->setoption)(mat,op,flg);
5580: }
5581: return 0;
5582: }
5584: /*@
5585: MatGetOption - Gets a parameter option that has been set for a matrix.
5587: Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption
5589: Input Parameters:
5590: + mat - the matrix
5591: - option - the option, this only responds to certain options, check the code for which ones
5593: Output Parameter:
5594: . flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)
5596: Notes:
5597: Can only be called after MatSetSizes() and MatSetType() have been set.
5599: Level: intermediate
5601: .seealso: MatOption, MatSetOption()
5603: @*/
5604: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5605: {
5612: switch (op) {
5613: case MAT_NO_OFF_PROC_ENTRIES:
5614: *flg = mat->nooffprocentries;
5615: break;
5616: case MAT_NO_OFF_PROC_ZERO_ROWS:
5617: *flg = mat->nooffproczerorows;
5618: break;
5619: case MAT_SYMMETRIC:
5620: *flg = mat->symmetric;
5621: break;
5622: case MAT_HERMITIAN:
5623: *flg = mat->hermitian;
5624: break;
5625: case MAT_STRUCTURALLY_SYMMETRIC:
5626: *flg = mat->structurally_symmetric;
5627: break;
5628: case MAT_SYMMETRY_ETERNAL:
5629: *flg = mat->symmetric_eternal;
5630: break;
5631: case MAT_SPD:
5632: *flg = mat->spd;
5633: break;
5634: default:
5635: break;
5636: }
5637: return 0;
5638: }
5640: /*@
5641: MatZeroEntries - Zeros all entries of a matrix. For sparse matrices
5642: this routine retains the old nonzero structure.
5644: Logically Collective on Mat
5646: Input Parameters:
5647: . mat - the matrix
5649: Level: intermediate
5651: Notes:
5652: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5653: See the Performance chapter of the users manual for information on preallocating matrices.
5655: .seealso: MatZeroRows()
5656: @*/
5657: PetscErrorCode MatZeroEntries(Mat mat)
5658: {
5664: MatCheckPreallocated(mat,1);
5666: PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5667: (*mat->ops->zeroentries)(mat);
5668: PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5669: PetscObjectStateIncrease((PetscObject)mat);
5670: return 0;
5671: }
5673: /*@
5674: MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5675: of a set of rows and columns of a matrix.
5677: Collective on Mat
5679: Input Parameters:
5680: + mat - the matrix
5681: . numRows - the number of rows to remove
5682: . rows - the global row indices
5683: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5684: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5685: - b - optional vector of right hand side, that will be adjusted by provided solution
5687: Notes:
5688: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5690: The user can set a value in the diagonal entry (or for the AIJ and
5691: row formats can optionally remove the main diagonal entry from the
5692: nonzero structure as well, by passing 0.0 as the final argument).
5694: For the parallel case, all processes that share the matrix (i.e.,
5695: those in the communicator used for matrix creation) MUST call this
5696: routine, regardless of whether any rows being zeroed are owned by
5697: them.
5699: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5700: list only rows local to itself).
5702: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5704: Level: intermediate
5706: .seealso: MatZeroRowsIS(), MatZeroRows(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5707: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5708: @*/
5709: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5710: {
5717: MatCheckPreallocated(mat,1);
5719: (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5720: MatViewFromOptions(mat,NULL,"-mat_view");
5721: PetscObjectStateIncrease((PetscObject)mat);
5722: return 0;
5723: }
5725: /*@
5726: MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5727: of a set of rows and columns of a matrix.
5729: Collective on Mat
5731: Input Parameters:
5732: + mat - the matrix
5733: . is - the rows to zero
5734: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5735: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5736: - b - optional vector of right hand side, that will be adjusted by provided solution
5738: Notes:
5739: This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.
5741: The user can set a value in the diagonal entry (or for the AIJ and
5742: row formats can optionally remove the main diagonal entry from the
5743: nonzero structure as well, by passing 0.0 as the final argument).
5745: For the parallel case, all processes that share the matrix (i.e.,
5746: those in the communicator used for matrix creation) MUST call this
5747: routine, regardless of whether any rows being zeroed are owned by
5748: them.
5750: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5751: list only rows local to itself).
5753: The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.
5755: Level: intermediate
5757: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5758: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRows(), MatZeroRowsColumnsStencil()
5759: @*/
5760: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5761: {
5762: PetscInt numRows;
5763: const PetscInt *rows;
5769: ISGetLocalSize(is,&numRows);
5770: ISGetIndices(is,&rows);
5771: MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5772: ISRestoreIndices(is,&rows);
5773: return 0;
5774: }
5776: /*@
5777: MatZeroRows - Zeros all entries (except possibly the main diagonal)
5778: of a set of rows of a matrix.
5780: Collective on Mat
5782: Input Parameters:
5783: + mat - the matrix
5784: . numRows - the number of rows to remove
5785: . rows - the global row indices
5786: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5787: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5788: - b - optional vector of right hand side, that will be adjusted by provided solution
5790: Notes:
5791: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5792: but does not release memory. For the dense and block diagonal
5793: formats this does not alter the nonzero structure.
5795: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5796: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5797: merely zeroed.
5799: The user can set a value in the diagonal entry (or for the AIJ and
5800: row formats can optionally remove the main diagonal entry from the
5801: nonzero structure as well, by passing 0.0 as the final argument).
5803: For the parallel case, all processes that share the matrix (i.e.,
5804: those in the communicator used for matrix creation) MUST call this
5805: routine, regardless of whether any rows being zeroed are owned by
5806: them.
5808: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5809: list only rows local to itself).
5811: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5812: owns that are to be zeroed. This saves a global synchronization in the implementation.
5814: Level: intermediate
5816: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5817: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5818: @*/
5819: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5820: {
5827: MatCheckPreallocated(mat,1);
5829: (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5830: MatViewFromOptions(mat,NULL,"-mat_view");
5831: PetscObjectStateIncrease((PetscObject)mat);
5832: return 0;
5833: }
5835: /*@
5836: MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5837: of a set of rows of a matrix.
5839: Collective on Mat
5841: Input Parameters:
5842: + mat - the matrix
5843: . is - index set of rows to remove (if NULL then no row is removed)
5844: . diag - value put in all diagonals of eliminated rows
5845: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5846: - b - optional vector of right hand side, that will be adjusted by provided solution
5848: Notes:
5849: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5850: but does not release memory. For the dense and block diagonal
5851: formats this does not alter the nonzero structure.
5853: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5854: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5855: merely zeroed.
5857: The user can set a value in the diagonal entry (or for the AIJ and
5858: row formats can optionally remove the main diagonal entry from the
5859: nonzero structure as well, by passing 0.0 as the final argument).
5861: For the parallel case, all processes that share the matrix (i.e.,
5862: those in the communicator used for matrix creation) MUST call this
5863: routine, regardless of whether any rows being zeroed are owned by
5864: them.
5866: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5867: list only rows local to itself).
5869: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5870: owns that are to be zeroed. This saves a global synchronization in the implementation.
5872: Level: intermediate
5874: .seealso: MatZeroRows(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5875: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5876: @*/
5877: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5878: {
5879: PetscInt numRows = 0;
5880: const PetscInt *rows = NULL;
5884: if (is) {
5886: ISGetLocalSize(is,&numRows);
5887: ISGetIndices(is,&rows);
5888: }
5889: MatZeroRows(mat,numRows,rows,diag,x,b);
5890: if (is) {
5891: ISRestoreIndices(is,&rows);
5892: }
5893: return 0;
5894: }
5896: /*@
5897: MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5898: of a set of rows of a matrix. These rows must be local to the process.
5900: Collective on Mat
5902: Input Parameters:
5903: + mat - the matrix
5904: . numRows - the number of rows to remove
5905: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
5906: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5907: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5908: - b - optional vector of right hand side, that will be adjusted by provided solution
5910: Notes:
5911: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5912: but does not release memory. For the dense and block diagonal
5913: formats this does not alter the nonzero structure.
5915: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5916: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5917: merely zeroed.
5919: The user can set a value in the diagonal entry (or for the AIJ and
5920: row formats can optionally remove the main diagonal entry from the
5921: nonzero structure as well, by passing 0.0 as the final argument).
5923: For the parallel case, all processes that share the matrix (i.e.,
5924: those in the communicator used for matrix creation) MUST call this
5925: routine, regardless of whether any rows being zeroed are owned by
5926: them.
5928: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5929: list only rows local to itself).
5931: The grid coordinates are across the entire grid, not just the local portion
5933: In Fortran idxm and idxn should be declared as
5934: $ MatStencil idxm(4,m)
5935: and the values inserted using
5936: $ idxm(MatStencil_i,1) = i
5937: $ idxm(MatStencil_j,1) = j
5938: $ idxm(MatStencil_k,1) = k
5939: $ idxm(MatStencil_c,1) = c
5940: etc
5942: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5943: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5944: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5945: DM_BOUNDARY_PERIODIC boundary type.
5947: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5948: a single value per point) you can skip filling those indices.
5950: Level: intermediate
5952: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsl(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
5953: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
5954: @*/
5955: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5956: {
5957: PetscInt dim = mat->stencil.dim;
5958: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
5959: PetscInt *dims = mat->stencil.dims+1;
5960: PetscInt *starts = mat->stencil.starts;
5961: PetscInt *dxm = (PetscInt*) rows;
5962: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
5968: PetscMalloc1(numRows, &jdxm);
5969: for (i = 0; i < numRows; ++i) {
5970: /* Skip unused dimensions (they are ordered k, j, i, c) */
5971: for (j = 0; j < 3-sdim; ++j) dxm++;
5972: /* Local index in X dir */
5973: tmp = *dxm++ - starts[0];
5974: /* Loop over remaining dimensions */
5975: for (j = 0; j < dim-1; ++j) {
5976: /* If nonlocal, set index to be negative */
5977: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5978: /* Update local index */
5979: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5980: }
5981: /* Skip component slot if necessary */
5982: if (mat->stencil.noc) dxm++;
5983: /* Local row number */
5984: if (tmp >= 0) {
5985: jdxm[numNewRows++] = tmp;
5986: }
5987: }
5988: MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5989: PetscFree(jdxm);
5990: return 0;
5991: }
5993: /*@
5994: MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5995: of a set of rows and columns of a matrix.
5997: Collective on Mat
5999: Input Parameters:
6000: + mat - the matrix
6001: . numRows - the number of rows/columns to remove
6002: . rows - the grid coordinates (and component number when dof > 1) for matrix rows
6003: . diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
6004: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6005: - b - optional vector of right hand side, that will be adjusted by provided solution
6007: Notes:
6008: For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
6009: but does not release memory. For the dense and block diagonal
6010: formats this does not alter the nonzero structure.
6012: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6013: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6014: merely zeroed.
6016: The user can set a value in the diagonal entry (or for the AIJ and
6017: row formats can optionally remove the main diagonal entry from the
6018: nonzero structure as well, by passing 0.0 as the final argument).
6020: For the parallel case, all processes that share the matrix (i.e.,
6021: those in the communicator used for matrix creation) MUST call this
6022: routine, regardless of whether any rows being zeroed are owned by
6023: them.
6025: Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
6026: list only rows local to itself, but the row/column numbers are given in local numbering).
6028: The grid coordinates are across the entire grid, not just the local portion
6030: In Fortran idxm and idxn should be declared as
6031: $ MatStencil idxm(4,m)
6032: and the values inserted using
6033: $ idxm(MatStencil_i,1) = i
6034: $ idxm(MatStencil_j,1) = j
6035: $ idxm(MatStencil_k,1) = k
6036: $ idxm(MatStencil_c,1) = c
6037: etc
6039: For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
6040: obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
6041: etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
6042: DM_BOUNDARY_PERIODIC boundary type.
6044: For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
6045: a single value per point) you can skip filling those indices.
6047: Level: intermediate
6049: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6050: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRows()
6051: @*/
6052: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
6053: {
6054: PetscInt dim = mat->stencil.dim;
6055: PetscInt sdim = dim - (1 - (PetscInt) mat->stencil.noc);
6056: PetscInt *dims = mat->stencil.dims+1;
6057: PetscInt *starts = mat->stencil.starts;
6058: PetscInt *dxm = (PetscInt*) rows;
6059: PetscInt *jdxm, i, j, tmp, numNewRows = 0;
6065: PetscMalloc1(numRows, &jdxm);
6066: for (i = 0; i < numRows; ++i) {
6067: /* Skip unused dimensions (they are ordered k, j, i, c) */
6068: for (j = 0; j < 3-sdim; ++j) dxm++;
6069: /* Local index in X dir */
6070: tmp = *dxm++ - starts[0];
6071: /* Loop over remaining dimensions */
6072: for (j = 0; j < dim-1; ++j) {
6073: /* If nonlocal, set index to be negative */
6074: if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
6075: /* Update local index */
6076: else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
6077: }
6078: /* Skip component slot if necessary */
6079: if (mat->stencil.noc) dxm++;
6080: /* Local row number */
6081: if (tmp >= 0) {
6082: jdxm[numNewRows++] = tmp;
6083: }
6084: }
6085: MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6086: PetscFree(jdxm);
6087: return 0;
6088: }
6090: /*@C
6091: MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6092: of a set of rows of a matrix; using local numbering of rows.
6094: Collective on Mat
6096: Input Parameters:
6097: + mat - the matrix
6098: . numRows - the number of rows to remove
6099: . rows - the local row indices
6100: . diag - value put in all diagonals of eliminated rows
6101: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6102: - b - optional vector of right hand side, that will be adjusted by provided solution
6104: Notes:
6105: Before calling MatZeroRowsLocal(), the user must first set the
6106: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6108: For the AIJ matrix formats this removes the old nonzero structure,
6109: but does not release memory. For the dense and block diagonal
6110: formats this does not alter the nonzero structure.
6112: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6113: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6114: merely zeroed.
6116: The user can set a value in the diagonal entry (or for the AIJ and
6117: row formats can optionally remove the main diagonal entry from the
6118: nonzero structure as well, by passing 0.0 as the final argument).
6120: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6121: owns that are to be zeroed. This saves a global synchronization in the implementation.
6123: Level: intermediate
6125: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRows(), MatSetOption(),
6126: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6127: @*/
6128: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6129: {
6135: MatCheckPreallocated(mat,1);
6137: if (mat->ops->zerorowslocal) {
6138: (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6139: } else {
6140: IS is, newis;
6141: const PetscInt *newRows;
6144: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6145: ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6146: ISGetIndices(newis,&newRows);
6147: (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6148: ISRestoreIndices(newis,&newRows);
6149: ISDestroy(&newis);
6150: ISDestroy(&is);
6151: }
6152: PetscObjectStateIncrease((PetscObject)mat);
6153: return 0;
6154: }
6156: /*@
6157: MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6158: of a set of rows of a matrix; using local numbering of rows.
6160: Collective on Mat
6162: Input Parameters:
6163: + mat - the matrix
6164: . is - index set of rows to remove
6165: . diag - value put in all diagonals of eliminated rows
6166: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6167: - b - optional vector of right hand side, that will be adjusted by provided solution
6169: Notes:
6170: Before calling MatZeroRowsLocalIS(), the user must first set the
6171: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6173: For the AIJ matrix formats this removes the old nonzero structure,
6174: but does not release memory. For the dense and block diagonal
6175: formats this does not alter the nonzero structure.
6177: If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6178: of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6179: merely zeroed.
6181: The user can set a value in the diagonal entry (or for the AIJ and
6182: row formats can optionally remove the main diagonal entry from the
6183: nonzero structure as well, by passing 0.0 as the final argument).
6185: You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6186: owns that are to be zeroed. This saves a global synchronization in the implementation.
6188: Level: intermediate
6190: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6191: MatZeroRowsColumnsLocal(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6192: @*/
6193: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6194: {
6195: PetscInt numRows;
6196: const PetscInt *rows;
6203: MatCheckPreallocated(mat,1);
6205: ISGetLocalSize(is,&numRows);
6206: ISGetIndices(is,&rows);
6207: MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6208: ISRestoreIndices(is,&rows);
6209: return 0;
6210: }
6212: /*@
6213: MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6214: of a set of rows and columns of a matrix; using local numbering of rows.
6216: Collective on Mat
6218: Input Parameters:
6219: + mat - the matrix
6220: . numRows - the number of rows to remove
6221: . rows - the global row indices
6222: . diag - value put in all diagonals of eliminated rows
6223: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6224: - b - optional vector of right hand side, that will be adjusted by provided solution
6226: Notes:
6227: Before calling MatZeroRowsColumnsLocal(), the user must first set the
6228: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6230: The user can set a value in the diagonal entry (or for the AIJ and
6231: row formats can optionally remove the main diagonal entry from the
6232: nonzero structure as well, by passing 0.0 as the final argument).
6234: Level: intermediate
6236: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6237: MatZeroRows(), MatZeroRowsColumnsLocalIS(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6238: @*/
6239: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6240: {
6241: IS is, newis;
6242: const PetscInt *newRows;
6249: MatCheckPreallocated(mat,1);
6252: ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6253: ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6254: ISGetIndices(newis,&newRows);
6255: (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6256: ISRestoreIndices(newis,&newRows);
6257: ISDestroy(&newis);
6258: ISDestroy(&is);
6259: PetscObjectStateIncrease((PetscObject)mat);
6260: return 0;
6261: }
6263: /*@
6264: MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6265: of a set of rows and columns of a matrix; using local numbering of rows.
6267: Collective on Mat
6269: Input Parameters:
6270: + mat - the matrix
6271: . is - index set of rows to remove
6272: . diag - value put in all diagonals of eliminated rows
6273: . x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6274: - b - optional vector of right hand side, that will be adjusted by provided solution
6276: Notes:
6277: Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6278: local-to-global mapping by calling MatSetLocalToGlobalMapping().
6280: The user can set a value in the diagonal entry (or for the AIJ and
6281: row formats can optionally remove the main diagonal entry from the
6282: nonzero structure as well, by passing 0.0 as the final argument).
6284: Level: intermediate
6286: .seealso: MatZeroRowsIS(), MatZeroRowsColumns(), MatZeroRowsLocalIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(),
6287: MatZeroRowsColumnsLocal(), MatZeroRows(), MatZeroRowsColumnsIS(), MatZeroRowsColumnsStencil()
6288: @*/
6289: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6290: {
6291: PetscInt numRows;
6292: const PetscInt *rows;
6299: MatCheckPreallocated(mat,1);
6301: ISGetLocalSize(is,&numRows);
6302: ISGetIndices(is,&rows);
6303: MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6304: ISRestoreIndices(is,&rows);
6305: return 0;
6306: }
6308: /*@C
6309: MatGetSize - Returns the numbers of rows and columns in a matrix.
6311: Not Collective
6313: Input Parameter:
6314: . mat - the matrix
6316: Output Parameters:
6317: + m - the number of global rows
6318: - n - the number of global columns
6320: Note: both output parameters can be NULL on input.
6322: Level: beginner
6324: .seealso: MatGetLocalSize()
6325: @*/
6326: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6327: {
6329: if (m) *m = mat->rmap->N;
6330: if (n) *n = mat->cmap->N;
6331: return 0;
6332: }
6334: /*@C
6335: MatGetLocalSize - Returns the number of local rows and local columns
6336: of a matrix, that is the local size of the left and right vectors as returned by MatCreateVecs().
6338: Not Collective
6340: Input Parameter:
6341: . mat - the matrix
6343: Output Parameters:
6344: + m - the number of local rows
6345: - n - the number of local columns
6347: Note: both output parameters can be NULL on input.
6349: Level: beginner
6351: .seealso: MatGetSize()
6352: @*/
6353: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6354: {
6358: if (m) *m = mat->rmap->n;
6359: if (n) *n = mat->cmap->n;
6360: return 0;
6361: }
6363: /*@C
6364: MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6365: this processor. (The columns of the "diagonal block")
6367: Not Collective, unless matrix has not been allocated, then collective on Mat
6369: Input Parameter:
6370: . mat - the matrix
6372: Output Parameters:
6373: + m - the global index of the first local column
6374: - n - one more than the global index of the last local column
6376: Notes:
6377: both output parameters can be NULL on input.
6379: Level: developer
6381: .seealso: MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()
6383: @*/
6384: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6385: {
6390: MatCheckPreallocated(mat,1);
6391: if (m) *m = mat->cmap->rstart;
6392: if (n) *n = mat->cmap->rend;
6393: return 0;
6394: }
6396: /*@C
6397: MatGetOwnershipRange - Returns the range of matrix rows owned by
6398: this processor, assuming that the matrix is laid out with the first
6399: n1 rows on the first processor, the next n2 rows on the second, etc.
6400: For certain parallel layouts this range may not be well defined.
6402: Not Collective
6404: Input Parameter:
6405: . mat - the matrix
6407: Output Parameters:
6408: + m - the global index of the first local row
6409: - n - one more than the global index of the last local row
6411: Note: Both output parameters can be NULL on input.
6412: $ This function requires that the matrix be preallocated. If you have not preallocated, consider using
6413: $ PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6414: $ and then MPI_Scan() to calculate prefix sums of the local sizes.
6416: Level: beginner
6418: .seealso: MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()
6420: @*/
6421: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6422: {
6427: MatCheckPreallocated(mat,1);
6428: if (m) *m = mat->rmap->rstart;
6429: if (n) *n = mat->rmap->rend;
6430: return 0;
6431: }
6433: /*@C
6434: MatGetOwnershipRanges - Returns the range of matrix rows owned by
6435: each process
6437: Not Collective, unless matrix has not been allocated, then collective on Mat
6439: Input Parameters:
6440: . mat - the matrix
6442: Output Parameters:
6443: . ranges - start of each processors portion plus one more than the total length at the end
6445: Level: beginner
6447: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()
6449: @*/
6450: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6451: {
6454: MatCheckPreallocated(mat,1);
6455: PetscLayoutGetRanges(mat->rmap,ranges);
6456: return 0;
6457: }
6459: /*@C
6460: MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6461: this processor. (The columns of the "diagonal blocks" for each process)
6463: Not Collective, unless matrix has not been allocated, then collective on Mat
6465: Input Parameters:
6466: . mat - the matrix
6468: Output Parameters:
6469: . ranges - start of each processors portion plus one more then the total length at the end
6471: Level: beginner
6473: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()
6475: @*/
6476: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6477: {
6480: MatCheckPreallocated(mat,1);
6481: PetscLayoutGetRanges(mat->cmap,ranges);
6482: return 0;
6483: }
6485: /*@C
6486: MatGetOwnershipIS - Get row and column ownership as index sets
6488: Not Collective
6490: Input Parameter:
6491: . A - matrix
6493: Output Parameters:
6494: + rows - rows in which this process owns elements
6495: - cols - columns in which this process owns elements
6497: Level: intermediate
6499: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MATSCALAPACK
6500: @*/
6501: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6502: {
6503: PetscErrorCode (*f)(Mat,IS*,IS*);
6505: MatCheckPreallocated(A,1);
6506: PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6507: if (f) {
6508: (*f)(A,rows,cols);
6509: } else { /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6510: if (rows) ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);
6511: if (cols) ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);
6512: }
6513: return 0;
6514: }
6516: /*@C
6517: MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6518: Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6519: to complete the factorization.
6521: Collective on Mat
6523: Input Parameters:
6524: + mat - the matrix
6525: . row - row permutation
6526: . column - column permutation
6527: - info - structure containing
6528: $ levels - number of levels of fill.
6529: $ expected fill - as ratio of original fill.
6530: $ 1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6531: missing diagonal entries)
6533: Output Parameters:
6534: . fact - new matrix that has been symbolically factored
6536: Notes:
6537: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.
6539: Most users should employ the simplified KSP interface for linear solvers
6540: instead of working directly with matrix algebra routines such as this.
6541: See, e.g., KSPCreate().
6543: Level: developer
6545: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6546: MatGetOrdering(), MatFactorInfo
6548: Note: this uses the definition of level of fill as in Y. Saad, 2003
6550: Developer Note: fortran interface is not autogenerated as the f90
6551: interface definition cannot be generated correctly [due to MatFactorInfo]
6553: References:
6554: . * - Y. Saad, Iterative methods for sparse linear systems Philadelphia: Society for Industrial and Applied Mathematics, 2003
6555: @*/
6556: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6557: {
6566: if (!fact->ops->ilufactorsymbolic) {
6567: MatSolverType stype;
6568: MatFactorGetSolverType(fact,&stype);
6569: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver type %s",((PetscObject)mat)->type_name,stype);
6570: }
6573: MatCheckPreallocated(mat,2);
6575: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6576: (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6577: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6578: return 0;
6579: }
6581: /*@C
6582: MatICCFactorSymbolic - Performs symbolic incomplete
6583: Cholesky factorization for a symmetric matrix. Use
6584: MatCholeskyFactorNumeric() to complete the factorization.
6586: Collective on Mat
6588: Input Parameters:
6589: + mat - the matrix
6590: . perm - row and column permutation
6591: - info - structure containing
6592: $ levels - number of levels of fill.
6593: $ expected fill - as ratio of original fill.
6595: Output Parameter:
6596: . fact - the factored matrix
6598: Notes:
6599: Most users should employ the KSP interface for linear solvers
6600: instead of working directly with matrix algebra routines such as this.
6601: See, e.g., KSPCreate().
6603: Level: developer
6605: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo
6607: Note: this uses the definition of level of fill as in Y. Saad, 2003
6609: Developer Note: fortran interface is not autogenerated as the f90
6610: interface definition cannot be generated correctly [due to MatFactorInfo]
6612: References:
6613: . * - Y. Saad, Iterative methods for sparse linear systems Philadelphia: Society for Industrial and Applied Mathematics, 2003
6614: @*/
6615: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6616: {
6625: if (!(fact)->ops->iccfactorsymbolic) {
6626: MatSolverType stype;
6627: MatFactorGetSolverType(fact,&stype);
6628: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver type %s",((PetscObject)mat)->type_name,stype);
6629: }
6631: MatCheckPreallocated(mat,2);
6633: if (!fact->trivialsymbolic) PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6634: (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6635: if (!fact->trivialsymbolic) PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6636: return 0;
6637: }
6639: /*@C
6640: MatCreateSubMatrices - Extracts several submatrices from a matrix. If submat
6641: points to an array of valid matrices, they may be reused to store the new
6642: submatrices.
6644: Collective on Mat
6646: Input Parameters:
6647: + mat - the matrix
6648: . n - the number of submatrixes to be extracted (on this processor, may be zero)
6649: . irow, icol - index sets of rows and columns to extract
6650: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6652: Output Parameter:
6653: . submat - the array of submatrices
6655: Notes:
6656: MatCreateSubMatrices() can extract ONLY sequential submatrices
6657: (from both sequential and parallel matrices). Use MatCreateSubMatrix()
6658: to extract a parallel submatrix.
6660: Some matrix types place restrictions on the row and column
6661: indices, such as that they be sorted or that they be equal to each other.
6663: The index sets may not have duplicate entries.
6665: When extracting submatrices from a parallel matrix, each processor can
6666: form a different submatrix by setting the rows and columns of its
6667: individual index sets according to the local submatrix desired.
6669: When finished using the submatrices, the user should destroy
6670: them with MatDestroySubMatrices().
6672: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6673: original matrix has not changed from that last call to MatCreateSubMatrices().
6675: This routine creates the matrices in submat; you should NOT create them before
6676: calling it. It also allocates the array of matrix pointers submat.
6678: For BAIJ matrices the index sets must respect the block structure, that is if they
6679: request one row/column in a block, they must request all rows/columns that are in
6680: that block. For example, if the block size is 2 you cannot request just row 0 and
6681: column 0.
6683: Fortran Note:
6684: The Fortran interface is slightly different from that given below; it
6685: requires one to pass in as submat a Mat (integer) array of size at least n+1.
6687: Level: advanced
6689: .seealso: MatDestroySubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6690: @*/
6691: PetscErrorCode MatCreateSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6692: {
6693: PetscInt i;
6694: PetscBool eq;
6698: if (n) {
6703: }
6705: if (n && scall == MAT_REUSE_MATRIX) {
6708: }
6712: MatCheckPreallocated(mat,1);
6714: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6715: (*mat->ops->createsubmatrices)(mat,n,irow,icol,scall,submat);
6716: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6717: for (i=0; i<n; i++) {
6718: (*submat)[i]->factortype = MAT_FACTOR_NONE; /* in case in place factorization was previously done on submatrix */
6719: ISEqualUnsorted(irow[i],icol[i],&eq);
6720: if (eq) {
6721: MatPropagateSymmetryOptions(mat,(*submat)[i]);
6722: }
6723: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
6724: if (mat->boundtocpu && mat->bindingpropagates) {
6725: MatBindToCPU((*submat)[i],PETSC_TRUE);
6726: MatSetBindingPropagates((*submat)[i],PETSC_TRUE);
6727: }
6728: #endif
6729: }
6730: return 0;
6731: }
6733: /*@C
6734: MatCreateSubMatricesMPI - Extracts MPI submatrices across a sub communicator of mat (by pairs of IS that may live on subcomms).
6736: Collective on Mat
6738: Input Parameters:
6739: + mat - the matrix
6740: . n - the number of submatrixes to be extracted
6741: . irow, icol - index sets of rows and columns to extract
6742: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
6744: Output Parameter:
6745: . submat - the array of submatrices
6747: Level: advanced
6749: .seealso: MatCreateSubMatrices(), MatCreateSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6750: @*/
6751: PetscErrorCode MatCreateSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6752: {
6753: PetscInt i;
6754: PetscBool eq;
6758: if (n) {
6763: }
6765: if (n && scall == MAT_REUSE_MATRIX) {
6768: }
6772: MatCheckPreallocated(mat,1);
6774: PetscLogEventBegin(MAT_CreateSubMats,mat,0,0,0);
6775: (*mat->ops->createsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6776: PetscLogEventEnd(MAT_CreateSubMats,mat,0,0,0);
6777: for (i=0; i<n; i++) {
6778: ISEqualUnsorted(irow[i],icol[i],&eq);
6779: if (eq) {
6780: MatPropagateSymmetryOptions(mat,(*submat)[i]);
6781: }
6782: }
6783: return 0;
6784: }
6786: /*@C
6787: MatDestroyMatrices - Destroys an array of matrices.
6789: Collective on Mat
6791: Input Parameters:
6792: + n - the number of local matrices
6793: - mat - the matrices (note that this is a pointer to the array of matrices)
6795: Level: advanced
6797: Notes:
6798: Frees not only the matrices, but also the array that contains the matrices
6799: In Fortran will not free the array.
6801: .seealso: MatCreateSubMatrices() MatDestroySubMatrices()
6802: @*/
6803: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6804: {
6805: PetscInt i;
6807: if (!*mat) return 0;
6811: for (i=0; i<n; i++) {
6812: MatDestroy(&(*mat)[i]);
6813: }
6815: /* memory is allocated even if n = 0 */
6816: PetscFree(*mat);
6817: return 0;
6818: }
6820: /*@C
6821: MatDestroySubMatrices - Destroys a set of matrices obtained with MatCreateSubMatrices().
6823: Collective on Mat
6825: Input Parameters:
6826: + n - the number of local matrices
6827: - mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6828: sequence of MatCreateSubMatrices())
6830: Level: advanced
6832: Notes:
6833: Frees not only the matrices, but also the array that contains the matrices
6834: In Fortran will not free the array.
6836: .seealso: MatCreateSubMatrices()
6837: @*/
6838: PetscErrorCode MatDestroySubMatrices(PetscInt n,Mat *mat[])
6839: {
6840: Mat mat0;
6842: if (!*mat) return 0;
6843: /* mat[] is an array of length n+1, see MatCreateSubMatrices_xxx() */
6847: mat0 = (*mat)[0];
6848: if (mat0 && mat0->ops->destroysubmatrices) {
6849: (mat0->ops->destroysubmatrices)(n,mat);
6850: } else {
6851: MatDestroyMatrices(n,mat);
6852: }
6853: return 0;
6854: }
6856: /*@C
6857: MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.
6859: Collective on Mat
6861: Input Parameters:
6862: . mat - the matrix
6864: Output Parameter:
6865: . matstruct - the sequential matrix with the nonzero structure of mat
6867: Level: intermediate
6869: .seealso: MatDestroySeqNonzeroStructure(), MatCreateSubMatrices(), MatDestroyMatrices()
6870: @*/
6871: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6872: {
6878: MatCheckPreallocated(mat,1);
6881: PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6882: (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6883: PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6884: return 0;
6885: }
6887: /*@C
6888: MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().
6890: Collective on Mat
6892: Input Parameters:
6893: . mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6894: sequence of MatGetSequentialNonzeroStructure())
6896: Level: advanced
6898: Notes:
6899: Frees not only the matrices, but also the array that contains the matrices
6901: .seealso: MatGetSeqNonzeroStructure()
6902: @*/
6903: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6904: {
6906: MatDestroy(mat);
6907: return 0;
6908: }
6910: /*@
6911: MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6912: replaces the index sets by larger ones that represent submatrices with
6913: additional overlap.
6915: Collective on Mat
6917: Input Parameters:
6918: + mat - the matrix
6919: . n - the number of index sets
6920: . is - the array of index sets (these index sets will changed during the call)
6921: - ov - the additional overlap requested
6923: Options Database:
6924: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
6926: Level: developer
6928: Developer Note:
6929: Any implementation must preserve block sizes. That is: if the row block size and the column block size of mat are equal to bs, then the output index sets must be compatible with bs.
6931: .seealso: MatCreateSubMatrices()
6932: @*/
6933: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6934: {
6935: PetscInt i,bs,cbs;
6940: if (n) {
6944: }
6947: MatCheckPreallocated(mat,1);
6949: if (!ov) return 0;
6951: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6952: (*mat->ops->increaseoverlap)(mat,n,is,ov);
6953: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6954: MatGetBlockSizes(mat,&bs,&cbs);
6955: if (bs == cbs) {
6956: for (i=0; i<n; i++) {
6957: ISSetBlockSize(is[i],bs);
6958: }
6959: }
6960: return 0;
6961: }
6963: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);
6965: /*@
6966: MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
6967: a sub communicator, replaces the index sets by larger ones that represent submatrices with
6968: additional overlap.
6970: Collective on Mat
6972: Input Parameters:
6973: + mat - the matrix
6974: . n - the number of index sets
6975: . is - the array of index sets (these index sets will changed during the call)
6976: - ov - the additional overlap requested
6978: Options Database:
6979: . -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)
6981: Level: developer
6983: .seealso: MatCreateSubMatrices()
6984: @*/
6985: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
6986: {
6987: PetscInt i;
6992: if (n) {
6995: }
6998: MatCheckPreallocated(mat,1);
6999: if (!ov) return 0;
7000: PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
7001: for (i=0; i<n; i++) {
7002: MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
7003: }
7004: PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
7005: return 0;
7006: }
7008: /*@
7009: MatGetBlockSize - Returns the matrix block size.
7011: Not Collective
7013: Input Parameter:
7014: . mat - the matrix
7016: Output Parameter:
7017: . bs - block size
7019: Notes:
7020: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7022: If the block size has not been set yet this routine returns 1.
7024: Level: intermediate
7026: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7027: @*/
7028: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7029: {
7032: *bs = PetscAbs(mat->rmap->bs);
7033: return 0;
7034: }
7036: /*@
7037: MatGetBlockSizes - Returns the matrix block row and column sizes.
7039: Not Collective
7041: Input Parameter:
7042: . mat - the matrix
7044: Output Parameters:
7045: + rbs - row block size
7046: - cbs - column block size
7048: Notes:
7049: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7050: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7052: If a block size has not been set yet this routine returns 1.
7054: Level: intermediate
7056: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7057: @*/
7058: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7059: {
7063: if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7064: if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7065: return 0;
7066: }
7068: /*@
7069: MatSetBlockSize - Sets the matrix block size.
7071: Logically Collective on Mat
7073: Input Parameters:
7074: + mat - the matrix
7075: - bs - block size
7077: Notes:
7078: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7079: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.
7081: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block size
7082: is compatible with the matrix local sizes.
7084: Level: intermediate
7086: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7087: @*/
7088: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7089: {
7092: MatSetBlockSizes(mat,bs,bs);
7093: return 0;
7094: }
7096: /*@
7097: MatSetVariableBlockSizes - Sets diagonal point-blocks of the matrix that need not be of the same size
7099: Logically Collective on Mat
7101: Input Parameters:
7102: + mat - the matrix
7103: . nblocks - the number of blocks on this process
7104: - bsizes - the block sizes
7106: Notes:
7107: Currently used by PCVPBJACOBI for AIJ matrices
7109: Each variable point-block set of degrees of freedom must live on a single MPI rank. That is a point block cannot straddle two MPI ranks.
7111: Level: intermediate
7113: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatGetVariableBlockSizes(), PCVPBJACOBI
7114: @*/
7115: PetscErrorCode MatSetVariableBlockSizes(Mat mat,PetscInt nblocks,PetscInt *bsizes)
7116: {
7117: PetscInt i,ncnt = 0, nlocal;
7121: MatGetLocalSize(mat,&nlocal,NULL);
7122: for (i=0; i<nblocks; i++) ncnt += bsizes[i];
7124: PetscFree(mat->bsizes);
7125: mat->nblocks = nblocks;
7126: PetscMalloc1(nblocks,&mat->bsizes);
7127: PetscArraycpy(mat->bsizes,bsizes,nblocks);
7128: return 0;
7129: }
7131: /*@C
7132: MatGetVariableBlockSizes - Gets a diagonal blocks of the matrix that need not be of the same size
7134: Logically Collective on Mat
7136: Input Parameter:
7137: . mat - the matrix
7139: Output Parameters:
7140: + nblocks - the number of blocks on this process
7141: - bsizes - the block sizes
7143: Notes: Currently not supported from Fortran
7145: Level: intermediate
7147: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes(), MatSetVariableBlockSizes()
7148: @*/
7149: PetscErrorCode MatGetVariableBlockSizes(Mat mat,PetscInt *nblocks,const PetscInt **bsizes)
7150: {
7152: *nblocks = mat->nblocks;
7153: *bsizes = mat->bsizes;
7154: return 0;
7155: }
7157: /*@
7158: MatSetBlockSizes - Sets the matrix block row and column sizes.
7160: Logically Collective on Mat
7162: Input Parameters:
7163: + mat - the matrix
7164: . rbs - row block size
7165: - cbs - column block size
7167: Notes:
7168: Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7169: If you pass a different block size for the columns than the rows, the row block size determines the square block storage.
7170: This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later.
7172: For MATMPIAIJ and MATSEQAIJ matrix formats, this function can be called at a later stage, provided that the specified block sizes
7173: are compatible with the matrix local sizes.
7175: The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().
7177: Level: intermediate
7179: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7180: @*/
7181: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7182: {
7186: if (mat->ops->setblocksizes) {
7187: (*mat->ops->setblocksizes)(mat,rbs,cbs);
7188: }
7189: if (mat->rmap->refcnt) {
7190: ISLocalToGlobalMapping l2g = NULL;
7191: PetscLayout nmap = NULL;
7193: PetscLayoutDuplicate(mat->rmap,&nmap);
7194: if (mat->rmap->mapping) {
7195: ISLocalToGlobalMappingDuplicate(mat->rmap->mapping,&l2g);
7196: }
7197: PetscLayoutDestroy(&mat->rmap);
7198: mat->rmap = nmap;
7199: mat->rmap->mapping = l2g;
7200: }
7201: if (mat->cmap->refcnt) {
7202: ISLocalToGlobalMapping l2g = NULL;
7203: PetscLayout nmap = NULL;
7205: PetscLayoutDuplicate(mat->cmap,&nmap);
7206: if (mat->cmap->mapping) {
7207: ISLocalToGlobalMappingDuplicate(mat->cmap->mapping,&l2g);
7208: }
7209: PetscLayoutDestroy(&mat->cmap);
7210: mat->cmap = nmap;
7211: mat->cmap->mapping = l2g;
7212: }
7213: PetscLayoutSetBlockSize(mat->rmap,rbs);
7214: PetscLayoutSetBlockSize(mat->cmap,cbs);
7215: return 0;
7216: }
7218: /*@
7219: MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices
7221: Logically Collective on Mat
7223: Input Parameters:
7224: + mat - the matrix
7225: . fromRow - matrix from which to copy row block size
7226: - fromCol - matrix from which to copy column block size (can be same as fromRow)
7228: Level: developer
7230: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7231: @*/
7232: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7233: {
7237: if (fromRow->rmap->bs > 0) PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);
7238: if (fromCol->cmap->bs > 0) PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);
7239: return 0;
7240: }
7242: /*@
7243: MatResidual - Default routine to calculate the residual.
7245: Collective on Mat
7247: Input Parameters:
7248: + mat - the matrix
7249: . b - the right-hand-side
7250: - x - the approximate solution
7252: Output Parameter:
7253: . r - location to store the residual
7255: Level: developer
7257: .seealso: PCMGSetResidual()
7258: @*/
7259: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7260: {
7266: MatCheckPreallocated(mat,1);
7267: PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7268: if (!mat->ops->residual) {
7269: MatMult(mat,x,r);
7270: VecAYPX(r,-1.0,b);
7271: } else {
7272: (*mat->ops->residual)(mat,b,x,r);
7273: }
7274: PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7275: return 0;
7276: }
7278: /*@C
7279: MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.
7281: Collective on Mat
7283: Input Parameters:
7284: + mat - the matrix
7285: . shift - 0 or 1 indicating we want the indices starting at 0 or 1
7286: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be symmetrized
7287: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7288: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7289: always used.
7291: Output Parameters:
7292: + n - number of rows in the (possibly compressed) matrix
7293: . ia - the row pointers; that is ia[0] = 0, ia[row] = ia[row-1] + number of elements in that row of the matrix
7294: . ja - the column indices
7295: - done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7296: are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set
7298: Level: developer
7300: Notes:
7301: You CANNOT change any of the ia[] or ja[] values.
7303: Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values.
7305: Fortran Notes:
7306: In Fortran use
7307: $
7308: $ PetscInt ia(1), ja(1)
7309: $ PetscOffset iia, jja
7310: $ call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7311: $ ! Access the ith and jth entries via ia(iia + i) and ja(jja + j)
7313: or
7314: $
7315: $ PetscInt, pointer :: ia(:),ja(:)
7316: $ call MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7317: $ ! Access the ith and jth entries via ia(i) and ja(j)
7319: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7320: @*/
7321: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7322: {
7329: MatCheckPreallocated(mat,1);
7330: if (!mat->ops->getrowij) *done = PETSC_FALSE;
7331: else {
7332: *done = PETSC_TRUE;
7333: PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7334: (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7335: PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7336: }
7337: return 0;
7338: }
7340: /*@C
7341: MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.
7343: Collective on Mat
7345: Input Parameters:
7346: + mat - the matrix
7347: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7348: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7349: symmetrized
7350: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7351: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7352: always used.
7353: . n - number of columns in the (possibly compressed) matrix
7354: . ia - the column pointers; that is ia[0] = 0, ia[col] = i[col-1] + number of elements in that col of the matrix
7355: - ja - the row indices
7357: Output Parameters:
7358: . done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned
7360: Level: developer
7362: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7363: @*/
7364: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7365: {
7372: MatCheckPreallocated(mat,1);
7373: if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7374: else {
7375: *done = PETSC_TRUE;
7376: (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7377: }
7378: return 0;
7379: }
7381: /*@C
7382: MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7383: MatGetRowIJ().
7385: Collective on Mat
7387: Input Parameters:
7388: + mat - the matrix
7389: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7390: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7391: symmetrized
7392: . inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7393: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7394: always used.
7395: . n - size of (possibly compressed) matrix
7396: . ia - the row pointers
7397: - ja - the column indices
7399: Output Parameters:
7400: . done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7402: Note:
7403: This routine zeros out n, ia, and ja. This is to prevent accidental
7404: us of the array after it has been restored. If you pass NULL, it will
7405: not zero the pointers. Use of ia or ja after MatRestoreRowIJ() is invalid.
7407: Level: developer
7409: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7410: @*/
7411: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7412: {
7418: MatCheckPreallocated(mat,1);
7420: if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7421: else {
7422: *done = PETSC_TRUE;
7423: (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7424: if (n) *n = 0;
7425: if (ia) *ia = NULL;
7426: if (ja) *ja = NULL;
7427: }
7428: return 0;
7429: }
7431: /*@C
7432: MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7433: MatGetColumnIJ().
7435: Collective on Mat
7437: Input Parameters:
7438: + mat - the matrix
7439: . shift - 1 or zero indicating we want the indices starting at 0 or 1
7440: . symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7441: symmetrized
7442: - inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7443: inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7444: always used.
7446: Output Parameters:
7447: + n - size of (possibly compressed) matrix
7448: . ia - the column pointers
7449: . ja - the row indices
7450: - done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned
7452: Level: developer
7454: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7455: @*/
7456: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool *done)
7457: {
7463: MatCheckPreallocated(mat,1);
7465: if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7466: else {
7467: *done = PETSC_TRUE;
7468: (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7469: if (n) *n = 0;
7470: if (ia) *ia = NULL;
7471: if (ja) *ja = NULL;
7472: }
7473: return 0;
7474: }
7476: /*@C
7477: MatColoringPatch -Used inside matrix coloring routines that
7478: use MatGetRowIJ() and/or MatGetColumnIJ().
7480: Collective on Mat
7482: Input Parameters:
7483: + mat - the matrix
7484: . ncolors - max color value
7485: . n - number of entries in colorarray
7486: - colorarray - array indicating color for each column
7488: Output Parameters:
7489: . iscoloring - coloring generated using colorarray information
7491: Level: developer
7493: .seealso: MatGetRowIJ(), MatGetColumnIJ()
7495: @*/
7496: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7497: {
7502: MatCheckPreallocated(mat,1);
7504: if (!mat->ops->coloringpatch) {
7505: ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7506: } else {
7507: (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7508: }
7509: return 0;
7510: }
7512: /*@
7513: MatSetUnfactored - Resets a factored matrix to be treated as unfactored.
7515: Logically Collective on Mat
7517: Input Parameter:
7518: . mat - the factored matrix to be reset
7520: Notes:
7521: This routine should be used only with factored matrices formed by in-place
7522: factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7523: format). This option can save memory, for example, when solving nonlinear
7524: systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7525: ILU(0) preconditioner.
7527: Note that one can specify in-place ILU(0) factorization by calling
7528: .vb
7529: PCType(pc,PCILU);
7530: PCFactorSeUseInPlace(pc);
7531: .ve
7532: or by using the options -pc_type ilu -pc_factor_in_place
7534: In-place factorization ILU(0) can also be used as a local
7535: solver for the blocks within the block Jacobi or additive Schwarz
7536: methods (runtime option: -sub_pc_factor_in_place). See Users-Manual: ch_pc
7537: for details on setting local solver options.
7539: Most users should employ the simplified KSP interface for linear solvers
7540: instead of working directly with matrix algebra routines such as this.
7541: See, e.g., KSPCreate().
7543: Level: developer
7545: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()
7547: @*/
7548: PetscErrorCode MatSetUnfactored(Mat mat)
7549: {
7552: MatCheckPreallocated(mat,1);
7553: mat->factortype = MAT_FACTOR_NONE;
7554: if (!mat->ops->setunfactored) return 0;
7555: (*mat->ops->setunfactored)(mat);
7556: return 0;
7557: }
7559: /*MC
7560: MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.
7562: Synopsis:
7563: MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7565: Not collective
7567: Input Parameter:
7568: . x - matrix
7570: Output Parameters:
7571: + xx_v - the Fortran90 pointer to the array
7572: - ierr - error code
7574: Example of Usage:
7575: .vb
7576: PetscScalar, pointer xx_v(:,:)
7577: ....
7578: call MatDenseGetArrayF90(x,xx_v,ierr)
7579: a = xx_v(3)
7580: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7581: .ve
7583: Level: advanced
7585: .seealso: MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()
7587: M*/
7589: /*MC
7590: MatDenseRestoreArrayF90 - Restores a matrix array that has been
7591: accessed with MatDenseGetArrayF90().
7593: Synopsis:
7594: MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)
7596: Not collective
7598: Input Parameters:
7599: + x - matrix
7600: - xx_v - the Fortran90 pointer to the array
7602: Output Parameter:
7603: . ierr - error code
7605: Example of Usage:
7606: .vb
7607: PetscScalar, pointer xx_v(:,:)
7608: ....
7609: call MatDenseGetArrayF90(x,xx_v,ierr)
7610: a = xx_v(3)
7611: call MatDenseRestoreArrayF90(x,xx_v,ierr)
7612: .ve
7614: Level: advanced
7616: .seealso: MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()
7618: M*/
7620: /*MC
7621: MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.
7623: Synopsis:
7624: MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7626: Not collective
7628: Input Parameter:
7629: . x - matrix
7631: Output Parameters:
7632: + xx_v - the Fortran90 pointer to the array
7633: - ierr - error code
7635: Example of Usage:
7636: .vb
7637: PetscScalar, pointer xx_v(:)
7638: ....
7639: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7640: a = xx_v(3)
7641: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7642: .ve
7644: Level: advanced
7646: .seealso: MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()
7648: M*/
7650: /*MC
7651: MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7652: accessed with MatSeqAIJGetArrayF90().
7654: Synopsis:
7655: MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)
7657: Not collective
7659: Input Parameters:
7660: + x - matrix
7661: - xx_v - the Fortran90 pointer to the array
7663: Output Parameter:
7664: . ierr - error code
7666: Example of Usage:
7667: .vb
7668: PetscScalar, pointer xx_v(:)
7669: ....
7670: call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7671: a = xx_v(3)
7672: call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7673: .ve
7675: Level: advanced
7677: .seealso: MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()
7679: M*/
7681: /*@
7682: MatCreateSubMatrix - Gets a single submatrix on the same number of processors
7683: as the original matrix.
7685: Collective on Mat
7687: Input Parameters:
7688: + mat - the original matrix
7689: . isrow - parallel IS containing the rows this processor should obtain
7690: . iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7691: - cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
7693: Output Parameter:
7694: . newmat - the new submatrix, of the same type as the old
7696: Level: advanced
7698: Notes:
7699: The submatrix will be able to be multiplied with vectors using the same layout as iscol.
7701: Some matrix types place restrictions on the row and column indices, such
7702: as that they be sorted or that they be equal to each other.
7704: The index sets may not have duplicate entries.
7706: The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7707: the MatCreateSubMatrix() routine will create the newmat for you. Any additional calls
7708: to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7709: will reuse the matrix generated the first time. You should call MatDestroy() on newmat when
7710: you are finished using it.
7712: The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7713: the input matrix.
7715: If iscol is NULL then all columns are obtained (not supported in Fortran).
7717: Example usage:
7718: Consider the following 8x8 matrix with 34 non-zero values, that is
7719: assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7720: proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7721: as follows:
7723: .vb
7724: 1 2 0 | 0 3 0 | 0 4
7725: Proc0 0 5 6 | 7 0 0 | 8 0
7726: 9 0 10 | 11 0 0 | 12 0
7727: -------------------------------------
7728: 13 0 14 | 15 16 17 | 0 0
7729: Proc1 0 18 0 | 19 20 21 | 0 0
7730: 0 0 0 | 22 23 0 | 24 0
7731: -------------------------------------
7732: Proc2 25 26 27 | 0 0 28 | 29 0
7733: 30 0 0 | 31 32 33 | 0 34
7734: .ve
7736: Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6]. The resulting submatrix is
7738: .vb
7739: 2 0 | 0 3 0 | 0
7740: Proc0 5 6 | 7 0 0 | 8
7741: -------------------------------
7742: Proc1 18 0 | 19 20 21 | 0
7743: -------------------------------
7744: Proc2 26 27 | 0 0 28 | 29
7745: 0 0 | 31 32 33 | 0
7746: .ve
7748: .seealso: MatCreateSubMatrices(), MatCreateSubMatricesMPI(), MatCreateSubMatrixVirtual(), MatSubMatrixVirtualUpdate()
7749: @*/
7750: PetscErrorCode MatCreateSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7751: {
7752: PetscMPIInt size;
7753: Mat *local;
7754: IS iscoltmp;
7755: PetscBool flg;
7766: MatCheckPreallocated(mat,1);
7767: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
7769: if (!iscol || isrow == iscol) {
7770: PetscBool stride;
7771: PetscMPIInt grabentirematrix = 0,grab;
7772: PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7773: if (stride) {
7774: PetscInt first,step,n,rstart,rend;
7775: ISStrideGetInfo(isrow,&first,&step);
7776: if (step == 1) {
7777: MatGetOwnershipRange(mat,&rstart,&rend);
7778: if (rstart == first) {
7779: ISGetLocalSize(isrow,&n);
7780: if (n == rend-rstart) {
7781: grabentirematrix = 1;
7782: }
7783: }
7784: }
7785: }
7786: MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7787: if (grab) {
7788: PetscInfo(mat,"Getting entire matrix as submatrix\n");
7789: if (cll == MAT_INITIAL_MATRIX) {
7790: *newmat = mat;
7791: PetscObjectReference((PetscObject)mat);
7792: }
7793: return 0;
7794: }
7795: }
7797: if (!iscol) {
7798: ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7799: } else {
7800: iscoltmp = iscol;
7801: }
7803: /* if original matrix is on just one processor then use submatrix generated */
7804: if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7805: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7806: goto setproperties;
7807: } else if (mat->ops->createsubmatrices && !mat->ops->createsubmatrix && size == 1) {
7808: MatCreateSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7809: *newmat = *local;
7810: PetscFree(local);
7811: goto setproperties;
7812: } else if (!mat->ops->createsubmatrix) {
7813: /* Create a new matrix type that implements the operation using the full matrix */
7814: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7815: switch (cll) {
7816: case MAT_INITIAL_MATRIX:
7817: MatCreateSubMatrixVirtual(mat,isrow,iscoltmp,newmat);
7818: break;
7819: case MAT_REUSE_MATRIX:
7820: MatSubMatrixVirtualUpdate(*newmat,mat,isrow,iscoltmp);
7821: break;
7822: default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7823: }
7824: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7825: goto setproperties;
7826: }
7829: PetscLogEventBegin(MAT_CreateSubMat,mat,0,0,0);
7830: (*mat->ops->createsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7831: PetscLogEventEnd(MAT_CreateSubMat,mat,0,0,0);
7833: setproperties:
7834: ISEqualUnsorted(isrow,iscoltmp,&flg);
7835: if (flg) {
7836: MatPropagateSymmetryOptions(mat,*newmat);
7837: }
7838: if (!iscol) ISDestroy(&iscoltmp);
7839: if (*newmat && cll == MAT_INITIAL_MATRIX) PetscObjectStateIncrease((PetscObject)*newmat);
7840: return 0;
7841: }
7843: /*@
7844: MatPropagateSymmetryOptions - Propagates symmetry options set on a matrix to another matrix
7846: Not Collective
7848: Input Parameters:
7849: + A - the matrix we wish to propagate options from
7850: - B - the matrix we wish to propagate options to
7852: Level: beginner
7854: Notes: Propagates the options associated to MAT_SYMMETRY_ETERNAL, MAT_STRUCTURALLY_SYMMETRIC, MAT_HERMITIAN, MAT_SPD and MAT_SYMMETRIC
7856: .seealso: MatSetOption()
7857: @*/
7858: PetscErrorCode MatPropagateSymmetryOptions(Mat A, Mat B)
7859: {
7862: if (A->symmetric_eternal) { /* symmetric_eternal does not have a corresponding *set flag */
7863: MatSetOption(B,MAT_SYMMETRY_ETERNAL,A->symmetric_eternal);
7864: }
7865: if (A->structurally_symmetric_set) {
7866: MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,A->structurally_symmetric);
7867: }
7868: if (A->hermitian_set) {
7869: MatSetOption(B,MAT_HERMITIAN,A->hermitian);
7870: }
7871: if (A->spd_set) {
7872: MatSetOption(B,MAT_SPD,A->spd);
7873: }
7874: if (A->symmetric_set) {
7875: MatSetOption(B,MAT_SYMMETRIC,A->symmetric);
7876: }
7877: return 0;
7878: }
7880: /*@
7881: MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7882: used during the assembly process to store values that belong to
7883: other processors.
7885: Not Collective
7887: Input Parameters:
7888: + mat - the matrix
7889: . size - the initial size of the stash.
7890: - bsize - the initial size of the block-stash(if used).
7892: Options Database Keys:
7893: + -matstash_initial_size <size> or <size0,size1,...sizep-1>
7894: - -matstash_block_initial_size <bsize> or <bsize0,bsize1,...bsizep-1>
7896: Level: intermediate
7898: Notes:
7899: The block-stash is used for values set with MatSetValuesBlocked() while
7900: the stash is used for values set with MatSetValues()
7902: Run with the option -info and look for output of the form
7903: MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7904: to determine the appropriate value, MM, to use for size and
7905: MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7906: to determine the value, BMM to use for bsize
7908: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()
7910: @*/
7911: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7912: {
7915: MatStashSetInitialSize_Private(&mat->stash,size);
7916: MatStashSetInitialSize_Private(&mat->bstash,bsize);
7917: return 0;
7918: }
7920: /*@
7921: MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7922: the matrix
7924: Neighbor-wise Collective on Mat
7926: Input Parameters:
7927: + mat - the matrix
7928: . x,y - the vectors
7929: - w - where the result is stored
7931: Level: intermediate
7933: Notes:
7934: w may be the same vector as y.
7936: This allows one to use either the restriction or interpolation (its transpose)
7937: matrix to do the interpolation
7939: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7941: @*/
7942: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7943: {
7944: PetscInt M,N,Ny;
7950: MatGetSize(A,&M,&N);
7951: VecGetSize(y,&Ny);
7952: if (M == Ny) {
7953: MatMultAdd(A,x,y,w);
7954: } else {
7955: MatMultTransposeAdd(A,x,y,w);
7956: }
7957: return 0;
7958: }
7960: /*@
7961: MatInterpolate - y = A*x or A'*x depending on the shape of
7962: the matrix
7964: Neighbor-wise Collective on Mat
7966: Input Parameters:
7967: + mat - the matrix
7968: - x,y - the vectors
7970: Level: intermediate
7972: Notes:
7973: This allows one to use either the restriction or interpolation (its transpose)
7974: matrix to do the interpolation
7976: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()
7978: @*/
7979: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7980: {
7981: PetscInt M,N,Ny;
7986: MatGetSize(A,&M,&N);
7987: VecGetSize(y,&Ny);
7988: if (M == Ny) {
7989: MatMult(A,x,y);
7990: } else {
7991: MatMultTranspose(A,x,y);
7992: }
7993: return 0;
7994: }
7996: /*@
7997: MatRestrict - y = A*x or A'*x
7999: Neighbor-wise Collective on Mat
8001: Input Parameters:
8002: + mat - the matrix
8003: - x,y - the vectors
8005: Level: intermediate
8007: Notes:
8008: This allows one to use either the restriction or interpolation (its transpose)
8009: matrix to do the restriction
8011: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()
8013: @*/
8014: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
8015: {
8016: PetscInt M,N,Ny;
8021: MatGetSize(A,&M,&N);
8022: VecGetSize(y,&Ny);
8023: if (M == Ny) {
8024: MatMult(A,x,y);
8025: } else {
8026: MatMultTranspose(A,x,y);
8027: }
8028: return 0;
8029: }
8031: /*@
8032: MatMatInterpolateAdd - Y = W + A*X or W + A'*X
8034: Neighbor-wise Collective on Mat
8036: Input Parameters:
8037: + mat - the matrix
8038: - w, x - the input dense matrices
8040: Output Parameters:
8041: . y - the output dense matrix
8043: Level: intermediate
8045: Notes:
8046: This allows one to use either the restriction or interpolation (its transpose)
8047: matrix to do the interpolation. y matrix can be reused if already created with the proper sizes,
8048: otherwise it will be recreated. y must be initialized to NULL if not supplied.
8050: .seealso: MatInterpolateAdd(), MatMatInterpolate(), MatMatRestrict()
8052: @*/
8053: PetscErrorCode MatMatInterpolateAdd(Mat A,Mat x,Mat w,Mat *y)
8054: {
8055: PetscInt M,N,Mx,Nx,Mo,My = 0,Ny = 0;
8056: PetscBool trans = PETSC_TRUE;
8057: MatReuse reuse = MAT_INITIAL_MATRIX;
8064: MatGetSize(A,&M,&N);
8065: MatGetSize(x,&Mx,&Nx);
8066: if (N == Mx) trans = PETSC_FALSE;
8068: Mo = trans ? N : M;
8069: if (*y) {
8070: MatGetSize(*y,&My,&Ny);
8071: if (Mo == My && Nx == Ny) { reuse = MAT_REUSE_MATRIX; }
8072: else {
8074: MatDestroy(y);
8075: }
8076: }
8078: if (w && *y == w) { /* this is to minimize changes in PCMG */
8079: PetscBool flg;
8081: PetscObjectQuery((PetscObject)*y,"__MatMatIntAdd_w",(PetscObject*)&w);
8082: if (w) {
8083: PetscInt My,Ny,Mw,Nw;
8085: PetscObjectTypeCompare((PetscObject)*y,((PetscObject)w)->type_name,&flg);
8086: MatGetSize(*y,&My,&Ny);
8087: MatGetSize(w,&Mw,&Nw);
8088: if (!flg || My != Mw || Ny != Nw) w = NULL;
8089: }
8090: if (!w) {
8091: MatDuplicate(*y,MAT_COPY_VALUES,&w);
8092: PetscObjectCompose((PetscObject)*y,"__MatMatIntAdd_w",(PetscObject)w);
8093: PetscLogObjectParent((PetscObject)*y,(PetscObject)w);
8094: PetscObjectDereference((PetscObject)w);
8095: } else {
8096: MatCopy(*y,w,UNKNOWN_NONZERO_PATTERN);
8097: }
8098: }
8099: if (!trans) {
8100: MatMatMult(A,x,reuse,PETSC_DEFAULT,y);
8101: } else {
8102: MatTransposeMatMult(A,x,reuse,PETSC_DEFAULT,y);
8103: }
8104: if (w) {
8105: MatAXPY(*y,1.0,w,UNKNOWN_NONZERO_PATTERN);
8106: }
8107: return 0;
8108: }
8110: /*@
8111: MatMatInterpolate - Y = A*X or A'*X
8113: Neighbor-wise Collective on Mat
8115: Input Parameters:
8116: + mat - the matrix
8117: - x - the input dense matrix
8119: Output Parameters:
8120: . y - the output dense matrix
8122: Level: intermediate
8124: Notes:
8125: This allows one to use either the restriction or interpolation (its transpose)
8126: matrix to do the interpolation. y matrix can be reused if already created with the proper sizes,
8127: otherwise it will be recreated. y must be initialized to NULL if not supplied.
8129: .seealso: MatInterpolate(), MatRestrict(), MatMatRestrict()
8131: @*/
8132: PetscErrorCode MatMatInterpolate(Mat A,Mat x,Mat *y)
8133: {
8134: MatMatInterpolateAdd(A,x,NULL,y);
8135: return 0;
8136: }
8138: /*@
8139: MatMatRestrict - Y = A*X or A'*X
8141: Neighbor-wise Collective on Mat
8143: Input Parameters:
8144: + mat - the matrix
8145: - x - the input dense matrix
8147: Output Parameters:
8148: . y - the output dense matrix
8150: Level: intermediate
8152: Notes:
8153: This allows one to use either the restriction or interpolation (its transpose)
8154: matrix to do the restriction. y matrix can be reused if already created with the proper sizes,
8155: otherwise it will be recreated. y must be initialized to NULL if not supplied.
8157: .seealso: MatRestrict(), MatInterpolate(), MatMatInterpolate()
8158: @*/
8159: PetscErrorCode MatMatRestrict(Mat A,Mat x,Mat *y)
8160: {
8161: MatMatInterpolateAdd(A,x,NULL,y);
8162: return 0;
8163: }
8165: /*@
8166: MatGetNullSpace - retrieves the null space of a matrix.
8168: Logically Collective on Mat
8170: Input Parameters:
8171: + mat - the matrix
8172: - nullsp - the null space object
8174: Level: developer
8176: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8177: @*/
8178: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8179: {
8182: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->nullsp) ? mat->transnullsp : mat->nullsp;
8183: return 0;
8184: }
8186: /*@
8187: MatSetNullSpace - attaches a null space to a matrix.
8189: Logically Collective on Mat
8191: Input Parameters:
8192: + mat - the matrix
8193: - nullsp - the null space object
8195: Level: advanced
8197: Notes:
8198: This null space is used by the KSP linear solvers to solve singular systems.
8200: Overwrites any previous null space that may have been attached. You can remove the null space from the matrix object by calling this routine with an nullsp of NULL
8202: For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) the KSP residuals will not converge to
8203: to zero but the linear system will still be solved in a least squares sense.
8205: The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8206: the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8207: Similarly R^m = direct sum n(A^T) + R(A). Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8208: n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8209: the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).
8210: This \hat{b} can be obtained by calling MatNullSpaceRemove() with the null space of the transpose of the matrix.
8212: If the matrix is known to be symmetric because it is an SBAIJ matrix or one as called MatSetOption(mat,MAT_SYMMETRIC or MAT_SYMMETRIC_ETERNAL,PETSC_TRUE); this
8213: routine also automatically calls MatSetTransposeNullSpace().
8215: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove(),
8216: KSPSetPCSide()
8217: @*/
8218: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8219: {
8222: if (nullsp) PetscObjectReference((PetscObject)nullsp);
8223: MatNullSpaceDestroy(&mat->nullsp);
8224: mat->nullsp = nullsp;
8225: if (mat->symmetric_set && mat->symmetric) {
8226: MatSetTransposeNullSpace(mat,nullsp);
8227: }
8228: return 0;
8229: }
8231: /*@
8232: MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.
8234: Logically Collective on Mat
8236: Input Parameters:
8237: + mat - the matrix
8238: - nullsp - the null space object
8240: Level: developer
8242: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8243: @*/
8244: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8245: {
8249: *nullsp = (mat->symmetric_set && mat->symmetric && !mat->transnullsp) ? mat->nullsp : mat->transnullsp;
8250: return 0;
8251: }
8253: /*@
8254: MatSetTransposeNullSpace - attaches the null space of a transpose of a matrix to the matrix
8256: Logically Collective on Mat
8258: Input Parameters:
8259: + mat - the matrix
8260: - nullsp - the null space object
8262: Level: advanced
8264: Notes:
8265: This allows solving singular linear systems defined by the transpose of the matrix using KSP solvers with left preconditioning.
8267: See MatSetNullSpace()
8269: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove(), KSPSetPCSide()
8270: @*/
8271: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8272: {
8275: if (nullsp) PetscObjectReference((PetscObject)nullsp);
8276: MatNullSpaceDestroy(&mat->transnullsp);
8277: mat->transnullsp = nullsp;
8278: return 0;
8279: }
8281: /*@
8282: MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8283: This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.
8285: Logically Collective on Mat
8287: Input Parameters:
8288: + mat - the matrix
8289: - nullsp - the null space object
8291: Level: advanced
8293: Notes:
8294: Overwrites any previous near null space that may have been attached
8296: You can remove the null space by calling this routine with an nullsp of NULL
8298: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8299: @*/
8300: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8301: {
8305: MatCheckPreallocated(mat,1);
8306: if (nullsp) PetscObjectReference((PetscObject)nullsp);
8307: MatNullSpaceDestroy(&mat->nearnullsp);
8308: mat->nearnullsp = nullsp;
8309: return 0;
8310: }
8312: /*@
8313: MatGetNearNullSpace - Get null space attached with MatSetNearNullSpace()
8315: Not Collective
8317: Input Parameter:
8318: . mat - the matrix
8320: Output Parameter:
8321: . nullsp - the null space object, NULL if not set
8323: Level: developer
8325: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8326: @*/
8327: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8328: {
8332: MatCheckPreallocated(mat,1);
8333: *nullsp = mat->nearnullsp;
8334: return 0;
8335: }
8337: /*@C
8338: MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.
8340: Collective on Mat
8342: Input Parameters:
8343: + mat - the matrix
8344: . row - row/column permutation
8345: . fill - expected fill factor >= 1.0
8346: - level - level of fill, for ICC(k)
8348: Notes:
8349: Probably really in-place only when level of fill is zero, otherwise allocates
8350: new space to store factored matrix and deletes previous memory.
8352: Most users should employ the simplified KSP interface for linear solvers
8353: instead of working directly with matrix algebra routines such as this.
8354: See, e.g., KSPCreate().
8356: Level: developer
8358: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
8360: Developer Note: fortran interface is not autogenerated as the f90
8361: interface definition cannot be generated correctly [due to MatFactorInfo]
8363: @*/
8364: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8365: {
8374: MatCheckPreallocated(mat,1);
8375: (*mat->ops->iccfactor)(mat,row,info);
8376: PetscObjectStateIncrease((PetscObject)mat);
8377: return 0;
8378: }
8380: /*@
8381: MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8382: ghosted ones.
8384: Not Collective
8386: Input Parameters:
8387: + mat - the matrix
8388: - diag - the diagonal values, including ghost ones
8390: Level: developer
8392: Notes:
8393: Works only for MPIAIJ and MPIBAIJ matrices
8395: .seealso: MatDiagonalScale()
8396: @*/
8397: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8398: {
8399: PetscMPIInt size;
8406: PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8407: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8408: if (size == 1) {
8409: PetscInt n,m;
8410: VecGetSize(diag,&n);
8411: MatGetSize(mat,NULL,&m);
8412: if (m == n) {
8413: MatDiagonalScale(mat,NULL,diag);
8414: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8415: } else {
8416: PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8417: }
8418: PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8419: PetscObjectStateIncrease((PetscObject)mat);
8420: return 0;
8421: }
8423: /*@
8424: MatGetInertia - Gets the inertia from a factored matrix
8426: Collective on Mat
8428: Input Parameter:
8429: . mat - the matrix
8431: Output Parameters:
8432: + nneg - number of negative eigenvalues
8433: . nzero - number of zero eigenvalues
8434: - npos - number of positive eigenvalues
8436: Level: advanced
8438: Notes:
8439: Matrix must have been factored by MatCholeskyFactor()
8441: @*/
8442: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8443: {
8449: (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8450: return 0;
8451: }
8453: /* ----------------------------------------------------------------*/
8454: /*@C
8455: MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors
8457: Neighbor-wise Collective on Mats
8459: Input Parameters:
8460: + mat - the factored matrix
8461: - b - the right-hand-side vectors
8463: Output Parameter:
8464: . x - the result vectors
8466: Notes:
8467: The vectors b and x cannot be the same. I.e., one cannot
8468: call MatSolves(A,x,x).
8470: Notes:
8471: Most users should employ the simplified KSP interface for linear solvers
8472: instead of working directly with matrix algebra routines such as this.
8473: See, e.g., KSPCreate().
8475: Level: developer
8477: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8478: @*/
8479: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8480: {
8485: if (!mat->rmap->N && !mat->cmap->N) return 0;
8488: MatCheckPreallocated(mat,1);
8489: PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8490: (*mat->ops->solves)(mat,b,x);
8491: PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8492: return 0;
8493: }
8495: /*@
8496: MatIsSymmetric - Test whether a matrix is symmetric
8498: Collective on Mat
8500: Input Parameters:
8501: + A - the matrix to test
8502: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)
8504: Output Parameters:
8505: . flg - the result
8507: Notes:
8508: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results
8510: Level: intermediate
8512: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8513: @*/
8514: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool *flg)
8515: {
8519: if (!A->symmetric_set) {
8520: if (!A->ops->issymmetric) {
8521: MatType mattype;
8522: MatGetType(A,&mattype);
8523: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type %s does not support checking for symmetric",mattype);
8524: }
8525: (*A->ops->issymmetric)(A,tol,flg);
8526: if (!tol) {
8527: MatSetOption(A,MAT_SYMMETRIC,*flg);
8528: }
8529: } else if (A->symmetric) {
8530: *flg = PETSC_TRUE;
8531: } else if (!tol) {
8532: *flg = PETSC_FALSE;
8533: } else {
8534: if (!A->ops->issymmetric) {
8535: MatType mattype;
8536: MatGetType(A,&mattype);
8537: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type %s does not support checking for symmetric",mattype);
8538: }
8539: (*A->ops->issymmetric)(A,tol,flg);
8540: }
8541: return 0;
8542: }
8544: /*@
8545: MatIsHermitian - Test whether a matrix is Hermitian
8547: Collective on Mat
8549: Input Parameters:
8550: + A - the matrix to test
8551: - tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)
8553: Output Parameters:
8554: . flg - the result
8556: Level: intermediate
8558: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8559: MatIsSymmetricKnown(), MatIsSymmetric()
8560: @*/
8561: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool *flg)
8562: {
8566: if (!A->hermitian_set) {
8567: if (!A->ops->ishermitian) {
8568: MatType mattype;
8569: MatGetType(A,&mattype);
8570: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type %s does not support checking for hermitian",mattype);
8571: }
8572: (*A->ops->ishermitian)(A,tol,flg);
8573: if (!tol) {
8574: MatSetOption(A,MAT_HERMITIAN,*flg);
8575: }
8576: } else if (A->hermitian) {
8577: *flg = PETSC_TRUE;
8578: } else if (!tol) {
8579: *flg = PETSC_FALSE;
8580: } else {
8581: if (!A->ops->ishermitian) {
8582: MatType mattype;
8583: MatGetType(A,&mattype);
8584: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type %s does not support checking for hermitian",mattype);
8585: }
8586: (*A->ops->ishermitian)(A,tol,flg);
8587: }
8588: return 0;
8589: }
8591: /*@
8592: MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.
8594: Not Collective
8596: Input Parameter:
8597: . A - the matrix to check
8599: Output Parameters:
8600: + set - if the symmetric flag is set (this tells you if the next flag is valid)
8601: - flg - the result
8603: Level: advanced
8605: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8606: if you want it explicitly checked
8608: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8609: @*/
8610: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool *set,PetscBool *flg)
8611: {
8615: if (A->symmetric_set) {
8616: *set = PETSC_TRUE;
8617: *flg = A->symmetric;
8618: } else {
8619: *set = PETSC_FALSE;
8620: }
8621: return 0;
8622: }
8624: /*@
8625: MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.
8627: Not Collective
8629: Input Parameter:
8630: . A - the matrix to check
8632: Output Parameters:
8633: + set - if the hermitian flag is set (this tells you if the next flag is valid)
8634: - flg - the result
8636: Level: advanced
8638: Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8639: if you want it explicitly checked
8641: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8642: @*/
8643: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool *set,PetscBool *flg)
8644: {
8648: if (A->hermitian_set) {
8649: *set = PETSC_TRUE;
8650: *flg = A->hermitian;
8651: } else {
8652: *set = PETSC_FALSE;
8653: }
8654: return 0;
8655: }
8657: /*@
8658: MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric
8660: Collective on Mat
8662: Input Parameter:
8663: . A - the matrix to test
8665: Output Parameters:
8666: . flg - the result
8668: Level: intermediate
8670: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8671: @*/
8672: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool *flg)
8673: {
8676: if (!A->structurally_symmetric_set) {
8678: (*A->ops->isstructurallysymmetric)(A,flg);
8679: MatSetOption(A,MAT_STRUCTURALLY_SYMMETRIC,*flg);
8680: } else *flg = A->structurally_symmetric;
8681: return 0;
8682: }
8684: /*@
8685: MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8686: to be communicated to other processors during the MatAssemblyBegin/End() process
8688: Not collective
8690: Input Parameter:
8691: . vec - the vector
8693: Output Parameters:
8694: + nstash - the size of the stash
8695: . reallocs - the number of additional mallocs incurred.
8696: . bnstash - the size of the block stash
8697: - breallocs - the number of additional mallocs incurred.in the block stash
8699: Level: advanced
8701: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()
8703: @*/
8704: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8705: {
8706: MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8707: MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8708: return 0;
8709: }
8711: /*@C
8712: MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8713: parallel layout
8715: Collective on Mat
8717: Input Parameter:
8718: . mat - the matrix
8720: Output Parameters:
8721: + right - (optional) vector that the matrix can be multiplied against
8722: - left - (optional) vector that the matrix vector product can be stored in
8724: Notes:
8725: The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().
8727: Notes:
8728: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed
8730: Level: advanced
8732: .seealso: MatCreate(), VecDestroy()
8733: @*/
8734: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8735: {
8738: if (mat->ops->getvecs) {
8739: (*mat->ops->getvecs)(mat,right,left);
8740: } else {
8741: PetscInt rbs,cbs;
8742: MatGetBlockSizes(mat,&rbs,&cbs);
8743: if (right) {
8745: VecCreate(PetscObjectComm((PetscObject)mat),right);
8746: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8747: VecSetBlockSize(*right,cbs);
8748: VecSetType(*right,mat->defaultvectype);
8749: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
8750: if (mat->boundtocpu && mat->bindingpropagates) {
8751: VecSetBindingPropagates(*right,PETSC_TRUE);
8752: VecBindToCPU(*right,PETSC_TRUE);
8753: }
8754: #endif
8755: PetscLayoutReference(mat->cmap,&(*right)->map);
8756: }
8757: if (left) {
8759: VecCreate(PetscObjectComm((PetscObject)mat),left);
8760: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8761: VecSetBlockSize(*left,rbs);
8762: VecSetType(*left,mat->defaultvectype);
8763: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
8764: if (mat->boundtocpu && mat->bindingpropagates) {
8765: VecSetBindingPropagates(*left,PETSC_TRUE);
8766: VecBindToCPU(*left,PETSC_TRUE);
8767: }
8768: #endif
8769: PetscLayoutReference(mat->rmap,&(*left)->map);
8770: }
8771: }
8772: return 0;
8773: }
8775: /*@C
8776: MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8777: with default values.
8779: Not Collective
8781: Input Parameters:
8782: . info - the MatFactorInfo data structure
8784: Notes:
8785: The solvers are generally used through the KSP and PC objects, for example
8786: PCLU, PCILU, PCCHOLESKY, PCICC
8788: Level: developer
8790: .seealso: MatFactorInfo
8792: Developer Note: fortran interface is not autogenerated as the f90
8793: interface definition cannot be generated correctly [due to MatFactorInfo]
8795: @*/
8797: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8798: {
8799: PetscMemzero(info,sizeof(MatFactorInfo));
8800: return 0;
8801: }
8803: /*@
8804: MatFactorSetSchurIS - Set indices corresponding to the Schur complement you wish to have computed
8806: Collective on Mat
8808: Input Parameters:
8809: + mat - the factored matrix
8810: - is - the index set defining the Schur indices (0-based)
8812: Notes:
8813: Call MatFactorSolveSchurComplement() or MatFactorSolveSchurComplementTranspose() after this call to solve a Schur complement system.
8815: You can call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() after this call.
8817: Level: developer
8819: .seealso: MatGetFactor(), MatFactorGetSchurComplement(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSolveSchurComplement(),
8820: MatFactorSolveSchurComplementTranspose(), MatFactorSolveSchurComplement()
8822: @*/
8823: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8824: {
8825: PetscErrorCode (*f)(Mat,IS);
8833: PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8835: MatDestroy(&mat->schur);
8836: (*f)(mat,is);
8838: return 0;
8839: }
8841: /*@
8842: MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step
8844: Logically Collective on Mat
8846: Input Parameters:
8847: + F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8848: . S - location where to return the Schur complement, can be NULL
8849: - status - the status of the Schur complement matrix, can be NULL
8851: Notes:
8852: You must call MatFactorSetSchurIS() before calling this routine.
8854: The routine provides a copy of the Schur matrix stored within the solver data structures.
8855: The caller must destroy the object when it is no longer needed.
8856: If MatFactorInvertSchurComplement() has been called, the routine gets back the inverse.
8858: Use MatFactorGetSchurComplement() to get access to the Schur complement matrix inside the factored matrix instead of making a copy of it (which this function does)
8860: Developer Notes:
8861: The reason this routine exists is because the representation of the Schur complement within the factor matrix may be different than a standard PETSc
8862: matrix representation and we normally do not want to use the time or memory to make a copy as a regular PETSc matrix.
8864: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
8866: Level: advanced
8868: References:
8870: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorSchurStatus
8871: @*/
8872: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8873: {
8877: if (S) {
8878: PetscErrorCode (*f)(Mat,Mat*);
8880: PetscObjectQueryFunction((PetscObject)F,"MatFactorCreateSchurComplement_C",&f);
8881: if (f) {
8882: (*f)(F,S);
8883: } else {
8884: MatDuplicate(F->schur,MAT_COPY_VALUES,S);
8885: }
8886: }
8887: if (status) *status = F->schur_status;
8888: return 0;
8889: }
8891: /*@
8892: MatFactorGetSchurComplement - Gets access to a Schur complement matrix using the current Schur data within a factored matrix
8894: Logically Collective on Mat
8896: Input Parameters:
8897: + F - the factored matrix obtained by calling MatGetFactor()
8898: . *S - location where to return the Schur complement, can be NULL
8899: - status - the status of the Schur complement matrix, can be NULL
8901: Notes:
8902: You must call MatFactorSetSchurIS() before calling this routine.
8904: Schur complement mode is currently implemented for sequential matrices.
8905: The routine returns a the Schur Complement stored within the data strutures of the solver.
8906: If MatFactorInvertSchurComplement() has previously been called, the returned matrix is actually the inverse of the Schur complement.
8907: The returned matrix should not be destroyed; the caller should call MatFactorRestoreSchurComplement() when the object is no longer needed.
8909: Use MatFactorCreateSchurComplement() to create a copy of the Schur complement matrix that is within a factored matrix
8911: See MatCreateSchurComplement() or MatGetSchurComplement() for ways to create virtual or approximate Schur complements.
8913: Level: advanced
8915: References:
8917: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8918: @*/
8919: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S,MatFactorSchurStatus* status)
8920: {
8924: if (S) *S = F->schur;
8925: if (status) *status = F->schur_status;
8926: return 0;
8927: }
8929: /*@
8930: MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement
8932: Logically Collective on Mat
8934: Input Parameters:
8935: + F - the factored matrix obtained by calling MatGetFactor()
8936: . *S - location where the Schur complement is stored
8937: - status - the status of the Schur complement matrix (see MatFactorSchurStatus)
8939: Notes:
8941: Level: advanced
8943: References:
8945: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement(), MatFactorSchurStatus
8946: @*/
8947: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S,MatFactorSchurStatus status)
8948: {
8950: if (S) {
8952: *S = NULL;
8953: }
8954: F->schur_status = status;
8955: MatFactorUpdateSchurStatus_Private(F);
8956: return 0;
8957: }
8959: /*@
8960: MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step
8962: Logically Collective on Mat
8964: Input Parameters:
8965: + F - the factored matrix obtained by calling MatGetFactor()
8966: . rhs - location where the right hand side of the Schur complement system is stored
8967: - sol - location where the solution of the Schur complement system has to be returned
8969: Notes:
8970: The sizes of the vectors should match the size of the Schur complement
8972: Must be called after MatFactorSetSchurIS()
8974: Level: advanced
8976: References:
8978: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplement()
8979: @*/
8980: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
8981: {
8990: MatFactorFactorizeSchurComplement(F);
8991: switch (F->schur_status) {
8992: case MAT_FACTOR_SCHUR_FACTORED:
8993: MatSolveTranspose(F->schur,rhs,sol);
8994: break;
8995: case MAT_FACTOR_SCHUR_INVERTED:
8996: MatMultTranspose(F->schur,rhs,sol);
8997: break;
8998: default:
8999: SETERRQ(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %d",F->schur_status);
9000: }
9001: return 0;
9002: }
9004: /*@
9005: MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step
9007: Logically Collective on Mat
9009: Input Parameters:
9010: + F - the factored matrix obtained by calling MatGetFactor()
9011: . rhs - location where the right hand side of the Schur complement system is stored
9012: - sol - location where the solution of the Schur complement system has to be returned
9014: Notes:
9015: The sizes of the vectors should match the size of the Schur complement
9017: Must be called after MatFactorSetSchurIS()
9019: Level: advanced
9021: References:
9023: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorSolveSchurComplementTranspose()
9024: @*/
9025: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
9026: {
9035: MatFactorFactorizeSchurComplement(F);
9036: switch (F->schur_status) {
9037: case MAT_FACTOR_SCHUR_FACTORED:
9038: MatSolve(F->schur,rhs,sol);
9039: break;
9040: case MAT_FACTOR_SCHUR_INVERTED:
9041: MatMult(F->schur,rhs,sol);
9042: break;
9043: default:
9044: SETERRQ(PetscObjectComm((PetscObject)F),PETSC_ERR_SUP,"Unhandled MatFactorSchurStatus %d",F->schur_status);
9045: }
9046: return 0;
9047: }
9049: /*@
9050: MatFactorInvertSchurComplement - Invert the Schur complement matrix computed during the factorization step
9052: Logically Collective on Mat
9054: Input Parameters:
9055: . F - the factored matrix obtained by calling MatGetFactor()
9057: Notes:
9058: Must be called after MatFactorSetSchurIS().
9060: Call MatFactorGetSchurComplement() or MatFactorCreateSchurComplement() AFTER this call to actually compute the inverse and get access to it.
9062: Level: advanced
9064: References:
9066: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement(), MatFactorCreateSchurComplement()
9067: @*/
9068: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9069: {
9072: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED) return 0;
9073: MatFactorFactorizeSchurComplement(F);
9074: MatFactorInvertSchurComplement_Private(F);
9075: F->schur_status = MAT_FACTOR_SCHUR_INVERTED;
9076: return 0;
9077: }
9079: /*@
9080: MatFactorFactorizeSchurComplement - Factorize the Schur complement matrix computed during the factorization step
9082: Logically Collective on Mat
9084: Input Parameters:
9085: . F - the factored matrix obtained by calling MatGetFactor()
9087: Notes:
9088: Must be called after MatFactorSetSchurIS().
9090: Level: advanced
9092: References:
9094: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorInvertSchurComplement()
9095: @*/
9096: PetscErrorCode MatFactorFactorizeSchurComplement(Mat F)
9097: {
9100: if (F->schur_status == MAT_FACTOR_SCHUR_INVERTED || F->schur_status == MAT_FACTOR_SCHUR_FACTORED) return 0;
9101: MatFactorFactorizeSchurComplement_Private(F);
9102: F->schur_status = MAT_FACTOR_SCHUR_FACTORED;
9103: return 0;
9104: }
9106: /*@
9107: MatPtAP - Creates the matrix product C = P^T * A * P
9109: Neighbor-wise Collective on Mat
9111: Input Parameters:
9112: + A - the matrix
9113: . P - the projection matrix
9114: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9115: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9116: if the result is a dense matrix this is irrelevant
9118: Output Parameters:
9119: . C - the product matrix
9121: Notes:
9122: C will be created and must be destroyed by the user with MatDestroy().
9124: For matrix types without special implementation the function fallbacks to MatMatMult() followed by MatTransposeMatMult().
9126: Level: intermediate
9128: .seealso: MatMatMult(), MatRARt()
9129: @*/
9130: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9131: {
9132: if (scall == MAT_REUSE_MATRIX) MatCheckProduct(*C,5);
9135: if (scall == MAT_INITIAL_MATRIX) {
9136: MatProductCreate(A,P,NULL,C);
9137: MatProductSetType(*C,MATPRODUCT_PtAP);
9138: MatProductSetAlgorithm(*C,"default");
9139: MatProductSetFill(*C,fill);
9141: (*C)->product->api_user = PETSC_TRUE;
9142: MatProductSetFromOptions(*C);
9144: MatProductSymbolic(*C);
9145: } else { /* scall == MAT_REUSE_MATRIX */
9146: MatProductReplaceMats(A,P,NULL,*C);
9147: }
9149: MatProductNumeric(*C);
9150: if (A->symmetric) {
9151: if (A->spd) {
9152: MatSetOption(*C,MAT_SPD,PETSC_TRUE);
9153: } else {
9154: MatSetOption(*C,MAT_SYMMETRIC,PETSC_TRUE);
9155: }
9156: }
9157: return 0;
9158: }
9160: /*@
9161: MatRARt - Creates the matrix product C = R * A * R^T
9163: Neighbor-wise Collective on Mat
9165: Input Parameters:
9166: + A - the matrix
9167: . R - the projection matrix
9168: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9169: - fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9170: if the result is a dense matrix this is irrelevant
9172: Output Parameters:
9173: . C - the product matrix
9175: Notes:
9176: C will be created and must be destroyed by the user with MatDestroy().
9178: This routine is currently only implemented for pairs of AIJ matrices and classes
9179: which inherit from AIJ. Due to PETSc sparse matrix block row distribution among processes,
9180: parallel MatRARt is implemented via explicit transpose of R, which could be very expensive.
9181: We recommend using MatPtAP().
9183: Level: intermediate
9185: .seealso: MatMatMult(), MatPtAP()
9186: @*/
9187: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9188: {
9189: if (scall == MAT_REUSE_MATRIX) MatCheckProduct(*C,5);
9192: if (scall == MAT_INITIAL_MATRIX) {
9193: MatProductCreate(A,R,NULL,C);
9194: MatProductSetType(*C,MATPRODUCT_RARt);
9195: MatProductSetAlgorithm(*C,"default");
9196: MatProductSetFill(*C,fill);
9198: (*C)->product->api_user = PETSC_TRUE;
9199: MatProductSetFromOptions(*C);
9201: MatProductSymbolic(*C);
9202: } else { /* scall == MAT_REUSE_MATRIX */
9203: MatProductReplaceMats(A,R,NULL,*C);
9204: }
9206: MatProductNumeric(*C);
9207: if (A->symmetric_set && A->symmetric) {
9208: MatSetOption(*C,MAT_SYMMETRIC,PETSC_TRUE);
9209: }
9210: return 0;
9211: }
9213: static PetscErrorCode MatProduct_Private(Mat A,Mat B,MatReuse scall,PetscReal fill,MatProductType ptype, Mat *C)
9214: {
9217: if (scall == MAT_INITIAL_MATRIX) {
9218: PetscInfo(A,"Calling MatProduct API with MAT_INITIAL_MATRIX and product type %s\n",MatProductTypes[ptype]);
9219: MatProductCreate(A,B,NULL,C);
9220: MatProductSetType(*C,ptype);
9221: MatProductSetAlgorithm(*C,MATPRODUCTALGORITHMDEFAULT);
9222: MatProductSetFill(*C,fill);
9224: (*C)->product->api_user = PETSC_TRUE;
9225: MatProductSetFromOptions(*C);
9226: MatProductSymbolic(*C);
9227: } else { /* scall == MAT_REUSE_MATRIX */
9228: Mat_Product *product = (*C)->product;
9229: PetscBool isdense;
9231: PetscObjectBaseTypeCompareAny((PetscObject)(*C),&isdense,MATSEQDENSE,MATMPIDENSE,"");
9232: if (isdense && product && product->type != ptype) {
9233: MatProductClear(*C);
9234: product = NULL;
9235: }
9236: PetscInfo(A,"Calling MatProduct API with MAT_REUSE_MATRIX %s product present and product type %s\n",product ? "with" : "without",MatProductTypes[ptype]);
9237: if (!product) { /* user provide the dense matrix *C without calling MatProductCreate() or reusing it from previous calls */
9238: if (isdense) {
9239: MatProductCreate_Private(A,B,NULL,*C);
9240: product = (*C)->product;
9241: product->fill = fill;
9242: product->api_user = PETSC_TRUE;
9243: product->clear = PETSC_TRUE;
9245: MatProductSetType(*C,ptype);
9246: MatProductSetFromOptions(*C);
9248: MatProductSymbolic(*C);
9249: } else SETERRQ(PetscObjectComm((PetscObject)(*C)),PETSC_ERR_SUP,"Call MatProductCreate() first");
9250: } else { /* user may change input matrices A or B when REUSE */
9251: MatProductReplaceMats(A,B,NULL,*C);
9252: }
9253: }
9254: MatProductNumeric(*C);
9255: return 0;
9256: }
9258: /*@
9259: MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.
9261: Neighbor-wise Collective on Mat
9263: Input Parameters:
9264: + A - the left matrix
9265: . B - the right matrix
9266: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9267: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9268: if the result is a dense matrix this is irrelevant
9270: Output Parameters:
9271: . C - the product matrix
9273: Notes:
9274: Unless scall is MAT_REUSE_MATRIX C will be created.
9276: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call and C was obtained from a previous
9277: call to this function with MAT_INITIAL_MATRIX.
9279: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value actually needed.
9281: If you have many matrices with the same non-zero structure to multiply, you should use MatProductCreate()/MatProductSymbolic()/MatProductReplaceMats(), and call MatProductNumeric() repeatedly.
9283: In the special case where matrix B (and hence C) are dense you can create the correctly sized matrix C yourself and then call this routine with MAT_REUSE_MATRIX, rather than first having MatMatMult() create it for you. You can NEVER do this if the matrix C is sparse.
9285: Example of Usage:
9286: .vb
9287: MatProductCreate(A,B,NULL,&C);
9288: MatProductSetType(C,MATPRODUCT_AB);
9289: MatProductSymbolic(C);
9290: MatProductNumeric(C); // compute C=A * B
9291: MatProductReplaceMats(A1,B1,NULL,C); // compute C=A1 * B1
9292: MatProductNumeric(C);
9293: MatProductReplaceMats(A2,NULL,NULL,C); // compute C=A2 * B1
9294: MatProductNumeric(C);
9295: .ve
9297: Level: intermediate
9299: .seealso: MatTransposeMatMult(), MatMatTransposeMult(), MatPtAP(), MatProductCreate(), MatProductSymbolic(), MatProductReplaceMats(), MatProductNumeric()
9300: @*/
9301: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9302: {
9303: MatProduct_Private(A,B,scall,fill,MATPRODUCT_AB,C);
9304: return 0;
9305: }
9307: /*@
9308: MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.
9310: Neighbor-wise Collective on Mat
9312: Input Parameters:
9313: + A - the left matrix
9314: . B - the right matrix
9315: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9316: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9318: Output Parameters:
9319: . C - the product matrix
9321: Notes:
9322: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9324: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call
9326: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9327: actually needed.
9329: This routine is currently only implemented for pairs of SeqAIJ matrices, for the SeqDense class,
9330: and for pairs of MPIDense matrices.
9332: Options Database Keys:
9333: . -matmattransmult_mpidense_mpidense_via {allgatherv,cyclic} - Choose between algorthims for MPIDense matrices: the
9334: first redundantly copies the transposed B matrix on each process and requiers O(log P) communication complexity;
9335: the second never stores more than one portion of the B matrix at a time by requires O(P) communication complexity.
9337: Level: intermediate
9339: .seealso: MatMatMult(), MatTransposeMatMult() MatPtAP()
9340: @*/
9341: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9342: {
9343: MatProduct_Private(A,B,scall,fill,MATPRODUCT_ABt,C);
9344: return 0;
9345: }
9347: /*@
9348: MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.
9350: Neighbor-wise Collective on Mat
9352: Input Parameters:
9353: + A - the left matrix
9354: . B - the right matrix
9355: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9356: - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known
9358: Output Parameters:
9359: . C - the product matrix
9361: Notes:
9362: C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().
9364: MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call.
9366: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9367: actually needed.
9369: This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9370: which inherit from SeqAIJ. C will be of same type as the input matrices.
9372: Level: intermediate
9374: .seealso: MatMatMult(), MatMatTransposeMult(), MatPtAP()
9375: @*/
9376: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9377: {
9378: MatProduct_Private(A,B,scall,fill,MATPRODUCT_AtB,C);
9379: return 0;
9380: }
9382: /*@
9383: MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.
9385: Neighbor-wise Collective on Mat
9387: Input Parameters:
9388: + A - the left matrix
9389: . B - the middle matrix
9390: . C - the right matrix
9391: . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9392: - fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9393: if the result is a dense matrix this is irrelevant
9395: Output Parameters:
9396: . D - the product matrix
9398: Notes:
9399: Unless scall is MAT_REUSE_MATRIX D will be created.
9401: MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call
9403: To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9404: actually needed.
9406: If you have many matrices with the same non-zero structure to multiply, you
9407: should use MAT_REUSE_MATRIX in all calls but the first or
9409: Level: intermediate
9411: .seealso: MatMatMult, MatPtAP()
9412: @*/
9413: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9414: {
9415: if (scall == MAT_REUSE_MATRIX) MatCheckProduct(*D,6);
9418: if (scall == MAT_INITIAL_MATRIX) {
9419: MatProductCreate(A,B,C,D);
9420: MatProductSetType(*D,MATPRODUCT_ABC);
9421: MatProductSetAlgorithm(*D,"default");
9422: MatProductSetFill(*D,fill);
9424: (*D)->product->api_user = PETSC_TRUE;
9425: MatProductSetFromOptions(*D);
9427: MatProductSymbolic(*D);
9428: } else { /* user may change input matrices when REUSE */
9429: MatProductReplaceMats(A,B,C,*D);
9430: }
9431: MatProductNumeric(*D);
9432: return 0;
9433: }
9435: /*@
9436: MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.
9438: Collective on Mat
9440: Input Parameters:
9441: + mat - the matrix
9442: . nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9443: . subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9444: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9446: Output Parameter:
9447: . matredundant - redundant matrix
9449: Notes:
9450: MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9451: original matrix has not changed from that last call to MatCreateRedundantMatrix().
9453: This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9454: calling it.
9456: Level: advanced
9458: .seealso: MatDestroy()
9459: @*/
9460: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9461: {
9462: MPI_Comm comm;
9463: PetscMPIInt size;
9464: PetscInt mloc_sub,nloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
9465: Mat_Redundant *redund=NULL;
9466: PetscSubcomm psubcomm=NULL;
9467: MPI_Comm subcomm_in=subcomm;
9468: Mat *matseq;
9469: IS isrow,iscol;
9470: PetscBool newsubcomm=PETSC_FALSE;
9473: if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9476: }
9478: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
9479: if (size == 1 || nsubcomm == 1) {
9480: if (reuse == MAT_INITIAL_MATRIX) {
9481: MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
9482: } else {
9484: MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
9485: }
9486: return 0;
9487: }
9491: MatCheckPreallocated(mat,1);
9493: PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
9494: if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
9495: /* create psubcomm, then get subcomm */
9496: PetscObjectGetComm((PetscObject)mat,&comm);
9497: MPI_Comm_size(comm,&size);
9500: PetscSubcommCreate(comm,&psubcomm);
9501: PetscSubcommSetNumber(psubcomm,nsubcomm);
9502: PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
9503: PetscSubcommSetFromOptions(psubcomm);
9504: PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
9505: newsubcomm = PETSC_TRUE;
9506: PetscSubcommDestroy(&psubcomm);
9507: }
9509: /* get isrow, iscol and a local sequential matrix matseq[0] */
9510: if (reuse == MAT_INITIAL_MATRIX) {
9511: mloc_sub = PETSC_DECIDE;
9512: nloc_sub = PETSC_DECIDE;
9513: if (bs < 1) {
9514: PetscSplitOwnership(subcomm,&mloc_sub,&M);
9515: PetscSplitOwnership(subcomm,&nloc_sub,&N);
9516: } else {
9517: PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
9518: PetscSplitOwnershipBlock(subcomm,bs,&nloc_sub,&N);
9519: }
9520: MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
9521: rstart = rend - mloc_sub;
9522: ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
9523: ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
9524: } else { /* reuse == MAT_REUSE_MATRIX */
9526: /* retrieve subcomm */
9527: PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
9528: redund = (*matredundant)->redundant;
9529: isrow = redund->isrow;
9530: iscol = redund->iscol;
9531: matseq = redund->matseq;
9532: }
9533: MatCreateSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);
9535: /* get matredundant over subcomm */
9536: if (reuse == MAT_INITIAL_MATRIX) {
9537: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],nloc_sub,reuse,matredundant);
9539: /* create a supporting struct and attach it to C for reuse */
9540: PetscNewLog(*matredundant,&redund);
9541: (*matredundant)->redundant = redund;
9542: redund->isrow = isrow;
9543: redund->iscol = iscol;
9544: redund->matseq = matseq;
9545: if (newsubcomm) {
9546: redund->subcomm = subcomm;
9547: } else {
9548: redund->subcomm = MPI_COMM_NULL;
9549: }
9550: } else {
9551: MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
9552: }
9553: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
9554: if (matseq[0]->boundtocpu && matseq[0]->bindingpropagates) {
9555: MatBindToCPU(*matredundant,PETSC_TRUE);
9556: MatSetBindingPropagates(*matredundant,PETSC_TRUE);
9557: }
9558: #endif
9559: PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
9560: return 0;
9561: }
9563: /*@C
9564: MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
9565: a given 'mat' object. Each submatrix can span multiple procs.
9567: Collective on Mat
9569: Input Parameters:
9570: + mat - the matrix
9571: . subcomm - the subcommunicator obtained by com_split(comm)
9572: - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9574: Output Parameter:
9575: . subMat - 'parallel submatrices each spans a given subcomm
9577: Notes:
9578: The submatrix partition across processors is dictated by 'subComm' a
9579: communicator obtained by com_split(comm). The comm_split
9580: is not restriced to be grouped with consecutive original ranks.
9582: Due the comm_split() usage, the parallel layout of the submatrices
9583: map directly to the layout of the original matrix [wrt the local
9584: row,col partitioning]. So the original 'DiagonalMat' naturally maps
9585: into the 'DiagonalMat' of the subMat, hence it is used directly from
9586: the subMat. However the offDiagMat looses some columns - and this is
9587: reconstructed with MatSetValues()
9589: Level: advanced
9591: .seealso: MatCreateSubMatrices()
9592: @*/
9593: PetscErrorCode MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
9594: {
9595: PetscMPIInt commsize,subCommSize;
9597: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
9598: MPI_Comm_size(subComm,&subCommSize);
9602: PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
9603: (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
9604: PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
9605: return 0;
9606: }
9608: /*@
9609: MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering
9611: Not Collective
9613: Input Parameters:
9614: + mat - matrix to extract local submatrix from
9615: . isrow - local row indices for submatrix
9616: - iscol - local column indices for submatrix
9618: Output Parameter:
9619: . submat - the submatrix
9621: Level: intermediate
9623: Notes:
9624: The submat should be returned with MatRestoreLocalSubMatrix().
9626: Depending on the format of mat, the returned submat may not implement MatMult(). Its communicator may be
9627: the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.
9629: The submat always implements MatSetValuesLocal(). If isrow and iscol have the same block size, then
9630: MatSetValuesBlockedLocal() will also be implemented.
9632: The mat must have had a ISLocalToGlobalMapping provided to it with MatSetLocalToGlobalMapping(). Note that
9633: matrices obtained with DMCreateMatrix() generally already have the local to global mapping provided.
9635: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef(), MatSetLocalToGlobalMapping()
9636: @*/
9637: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9638: {
9646: if (mat->ops->getlocalsubmatrix) {
9647: (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
9648: } else {
9649: MatCreateLocalRef(mat,isrow,iscol,submat);
9650: }
9651: return 0;
9652: }
9654: /*@
9655: MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering
9657: Not Collective
9659: Input Parameters:
9660: + mat - matrix to extract local submatrix from
9661: . isrow - local row indices for submatrix
9662: . iscol - local column indices for submatrix
9663: - submat - the submatrix
9665: Level: intermediate
9667: .seealso: MatGetLocalSubMatrix()
9668: @*/
9669: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
9670: {
9676: if (*submat) {
9678: }
9680: if (mat->ops->restorelocalsubmatrix) {
9681: (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
9682: } else {
9683: MatDestroy(submat);
9684: }
9685: *submat = NULL;
9686: return 0;
9687: }
9689: /* --------------------------------------------------------*/
9690: /*@
9691: MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no diagonal entry in the matrix
9693: Collective on Mat
9695: Input Parameter:
9696: . mat - the matrix
9698: Output Parameter:
9699: . is - if any rows have zero diagonals this contains the list of them
9701: Level: developer
9703: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9704: @*/
9705: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
9706: {
9712: if (!mat->ops->findzerodiagonals) {
9713: Vec diag;
9714: const PetscScalar *a;
9715: PetscInt *rows;
9716: PetscInt rStart, rEnd, r, nrow = 0;
9718: MatCreateVecs(mat, &diag, NULL);
9719: MatGetDiagonal(mat, diag);
9720: MatGetOwnershipRange(mat, &rStart, &rEnd);
9721: VecGetArrayRead(diag, &a);
9722: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
9723: PetscMalloc1(nrow, &rows);
9724: nrow = 0;
9725: for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
9726: VecRestoreArrayRead(diag, &a);
9727: VecDestroy(&diag);
9728: ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
9729: } else {
9730: (*mat->ops->findzerodiagonals)(mat, is);
9731: }
9732: return 0;
9733: }
9735: /*@
9736: MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)
9738: Collective on Mat
9740: Input Parameter:
9741: . mat - the matrix
9743: Output Parameter:
9744: . is - contains the list of rows with off block diagonal entries
9746: Level: developer
9748: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
9749: @*/
9750: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
9751: {
9758: (*mat->ops->findoffblockdiagonalentries)(mat,is);
9759: return 0;
9760: }
9762: /*@C
9763: MatInvertBlockDiagonal - Inverts the block diagonal entries.
9765: Collective on Mat
9767: Input Parameters:
9768: . mat - the matrix
9770: Output Parameters:
9771: . values - the block inverses in column major order (FORTRAN-like)
9773: Note:
9774: The size of the blocks is determined by the block size of the matrix.
9776: Fortran Note:
9777: This routine is not available from Fortran.
9779: Level: advanced
9781: .seealso: MatInvertBlockDiagonalMat()
9782: @*/
9783: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
9784: {
9789: (*mat->ops->invertblockdiagonal)(mat,values);
9790: return 0;
9791: }
9793: /*@C
9794: MatInvertVariableBlockDiagonal - Inverts the point block diagonal entries.
9796: Collective on Mat
9798: Input Parameters:
9799: + mat - the matrix
9800: . nblocks - the number of blocks
9801: - bsizes - the size of each block
9803: Output Parameters:
9804: . values - the block inverses in column major order (FORTRAN-like)
9806: Note:
9807: This routine is not available from Fortran.
9809: Level: advanced
9811: .seealso: MatInvertBlockDiagonal()
9812: @*/
9813: PetscErrorCode MatInvertVariableBlockDiagonal(Mat mat,PetscInt nblocks,const PetscInt *bsizes,PetscScalar *values)
9814: {
9819: (*mat->ops->invertvariableblockdiagonal)(mat,nblocks,bsizes,values);
9820: return 0;
9821: }
9823: /*@
9824: MatInvertBlockDiagonalMat - set matrix C to be the inverted block diagonal of matrix A
9826: Collective on Mat
9828: Input Parameters:
9829: . A - the matrix
9831: Output Parameters:
9832: . C - matrix with inverted block diagonal of A. This matrix should be created and may have its type set.
9834: Notes: the blocksize of the matrix is used to determine the blocks on the diagonal of C
9836: Level: advanced
9838: .seealso: MatInvertBlockDiagonal()
9839: @*/
9840: PetscErrorCode MatInvertBlockDiagonalMat(Mat A,Mat C)
9841: {
9842: const PetscScalar *vals;
9843: PetscInt *dnnz;
9844: PetscInt M,N,m,n,rstart,rend,bs,i,j;
9846: MatInvertBlockDiagonal(A,&vals);
9847: MatGetBlockSize(A,&bs);
9848: MatGetSize(A,&M,&N);
9849: MatGetLocalSize(A,&m,&n);
9850: MatSetSizes(C,m,n,M,N);
9851: MatSetBlockSize(C,bs);
9852: PetscMalloc1(m/bs,&dnnz);
9853: for (j = 0; j < m/bs; j++) dnnz[j] = 1;
9854: MatXAIJSetPreallocation(C,bs,dnnz,NULL,NULL,NULL);
9855: PetscFree(dnnz);
9856: MatGetOwnershipRange(C,&rstart,&rend);
9857: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_FALSE);
9858: for (i = rstart/bs; i < rend/bs; i++) {
9859: MatSetValuesBlocked(C,1,&i,1,&i,&vals[(i-rstart/bs)*bs*bs],INSERT_VALUES);
9860: }
9861: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
9862: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
9863: MatSetOption(C,MAT_ROW_ORIENTED,PETSC_TRUE);
9864: return 0;
9865: }
9867: /*@C
9868: MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
9869: via MatTransposeColoringCreate().
9871: Collective on MatTransposeColoring
9873: Input Parameter:
9874: . c - coloring context
9876: Level: intermediate
9878: .seealso: MatTransposeColoringCreate()
9879: @*/
9880: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
9881: {
9882: MatTransposeColoring matcolor=*c;
9884: if (!matcolor) return 0;
9885: if (--((PetscObject)matcolor)->refct > 0) {matcolor = NULL; return 0;}
9887: PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
9888: PetscFree(matcolor->rows);
9889: PetscFree(matcolor->den2sp);
9890: PetscFree(matcolor->colorforcol);
9891: PetscFree(matcolor->columns);
9892: if (matcolor->brows>0) {
9893: PetscFree(matcolor->lstart);
9894: }
9895: PetscHeaderDestroy(c);
9896: return 0;
9897: }
9899: /*@C
9900: MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
9901: a MatTransposeColoring context has been created, computes a dense B^T by Apply
9902: MatTransposeColoring to sparse B.
9904: Collective on MatTransposeColoring
9906: Input Parameters:
9907: + B - sparse matrix B
9908: . Btdense - symbolic dense matrix B^T
9909: - coloring - coloring context created with MatTransposeColoringCreate()
9911: Output Parameter:
9912: . Btdense - dense matrix B^T
9914: Level: advanced
9916: Notes:
9917: These are used internally for some implementations of MatRARt()
9919: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplyDenToSp()
9921: @*/
9922: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
9923: {
9929: (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
9930: return 0;
9931: }
9933: /*@C
9934: MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
9935: a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
9936: in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
9937: Csp from Cden.
9939: Collective on MatTransposeColoring
9941: Input Parameters:
9942: + coloring - coloring context created with MatTransposeColoringCreate()
9943: - Cden - matrix product of a sparse matrix and a dense matrix Btdense
9945: Output Parameter:
9946: . Csp - sparse matrix
9948: Level: advanced
9950: Notes:
9951: These are used internally for some implementations of MatRARt()
9953: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()
9955: @*/
9956: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
9957: {
9963: (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
9964: MatAssemblyBegin(Csp,MAT_FINAL_ASSEMBLY);
9965: MatAssemblyEnd(Csp,MAT_FINAL_ASSEMBLY);
9966: return 0;
9967: }
9969: /*@C
9970: MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.
9972: Collective on Mat
9974: Input Parameters:
9975: + mat - the matrix product C
9976: - iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()
9978: Output Parameter:
9979: . color - the new coloring context
9981: Level: intermediate
9983: .seealso: MatTransposeColoringDestroy(), MatTransColoringApplySpToDen(),
9984: MatTransColoringApplyDenToSp()
9985: @*/
9986: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
9987: {
9988: MatTransposeColoring c;
9989: MPI_Comm comm;
9991: PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
9992: PetscObjectGetComm((PetscObject)mat,&comm);
9993: PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);
9995: c->ctype = iscoloring->ctype;
9996: if (mat->ops->transposecoloringcreate) {
9997: (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
9998: } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for matrix type %s",((PetscObject)mat)->type_name);
10000: *color = c;
10001: PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10002: return 0;
10003: }
10005: /*@
10006: MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10007: matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10008: same, otherwise it will be larger
10010: Not Collective
10012: Input Parameter:
10013: . A - the matrix
10015: Output Parameter:
10016: . state - the current state
10018: Notes:
10019: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10020: different matrices
10022: Level: intermediate
10024: @*/
10025: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10026: {
10028: *state = mat->nonzerostate;
10029: return 0;
10030: }
10032: /*@
10033: MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10034: matrices from each processor
10036: Collective
10038: Input Parameters:
10039: + comm - the communicators the parallel matrix will live on
10040: . seqmat - the input sequential matrices
10041: . n - number of local columns (or PETSC_DECIDE)
10042: - reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10044: Output Parameter:
10045: . mpimat - the parallel matrix generated
10047: Level: advanced
10049: Notes:
10050: The number of columns of the matrix in EACH processor MUST be the same.
10052: @*/
10053: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10054: {
10058: PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10059: (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10060: PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10061: return 0;
10062: }
10064: /*@
10065: MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10066: ranks' ownership ranges.
10068: Collective on A
10070: Input Parameters:
10071: + A - the matrix to create subdomains from
10072: - N - requested number of subdomains
10074: Output Parameters:
10075: + n - number of subdomains resulting on this rank
10076: - iss - IS list with indices of subdomains on this rank
10078: Level: advanced
10080: Notes:
10081: number of subdomains must be smaller than the communicator size
10082: @*/
10083: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10084: {
10085: MPI_Comm comm,subcomm;
10086: PetscMPIInt size,rank,color;
10087: PetscInt rstart,rend,k;
10089: PetscObjectGetComm((PetscObject)A,&comm);
10090: MPI_Comm_size(comm,&size);
10091: MPI_Comm_rank(comm,&rank);
10093: *n = 1;
10094: k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10095: color = rank/k;
10096: MPI_Comm_split(comm,color,rank,&subcomm);
10097: PetscMalloc1(1,iss);
10098: MatGetOwnershipRange(A,&rstart,&rend);
10099: ISCreateStride(subcomm,rend-rstart,rstart,1,iss[0]);
10100: MPI_Comm_free(&subcomm);
10101: return 0;
10102: }
10104: /*@
10105: MatGalerkin - Constructs the coarse grid problem via Galerkin projection.
10107: If the interpolation and restriction operators are the same, uses MatPtAP.
10108: If they are not the same, use MatMatMatMult.
10110: Once the coarse grid problem is constructed, correct for interpolation operators
10111: that are not of full rank, which can legitimately happen in the case of non-nested
10112: geometric multigrid.
10114: Input Parameters:
10115: + restrct - restriction operator
10116: . dA - fine grid matrix
10117: . interpolate - interpolation operator
10118: . reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
10119: - fill - expected fill, use PETSC_DEFAULT if you do not have a good estimate
10121: Output Parameters:
10122: . A - the Galerkin coarse matrix
10124: Options Database Key:
10125: . -pc_mg_galerkin <both,pmat,mat,none> - for what matrices the Galerkin process should be used
10127: Level: developer
10129: .seealso: MatPtAP(), MatMatMatMult()
10130: @*/
10131: PetscErrorCode MatGalerkin(Mat restrct, Mat dA, Mat interpolate, MatReuse reuse, PetscReal fill, Mat *A)
10132: {
10133: IS zerorows;
10134: Vec diag;
10137: /* Construct the coarse grid matrix */
10138: if (interpolate == restrct) {
10139: MatPtAP(dA,interpolate,reuse,fill,A);
10140: } else {
10141: MatMatMatMult(restrct,dA,interpolate,reuse,fill,A);
10142: }
10144: /* If the interpolation matrix is not of full rank, A will have zero rows.
10145: This can legitimately happen in the case of non-nested geometric multigrid.
10146: In that event, we set the rows of the matrix to the rows of the identity,
10147: ignoring the equations (as the RHS will also be zero). */
10149: MatFindZeroRows(*A, &zerorows);
10151: if (zerorows != NULL) { /* if there are any zero rows */
10152: MatCreateVecs(*A, &diag, NULL);
10153: MatGetDiagonal(*A, diag);
10154: VecISSet(diag, zerorows, 1.0);
10155: MatDiagonalSet(*A, diag, INSERT_VALUES);
10156: VecDestroy(&diag);
10157: ISDestroy(&zerorows);
10158: }
10159: return 0;
10160: }
10162: /*@C
10163: MatSetOperation - Allows user to set a matrix operation for any matrix type
10165: Logically Collective on Mat
10167: Input Parameters:
10168: + mat - the matrix
10169: . op - the name of the operation
10170: - f - the function that provides the operation
10172: Level: developer
10174: Usage:
10175: $ extern PetscErrorCode usermult(Mat,Vec,Vec);
10176: $ MatCreateXXX(comm,...&A);
10177: $ MatSetOperation(A,MATOP_MULT,(void(*)(void))usermult);
10179: Notes:
10180: See the file include/petscmat.h for a complete list of matrix
10181: operations, which all have the form MATOP_<OPERATION>, where
10182: <OPERATION> is the name (in all capital letters) of the
10183: user interface routine (e.g., MatMult() -> MATOP_MULT).
10185: All user-provided functions (except for MATOP_DESTROY) should have the same calling
10186: sequence as the usual matrix interface routines, since they
10187: are intended to be accessed via the usual matrix interface
10188: routines, e.g.,
10189: $ MatMult(Mat,Vec,Vec) -> usermult(Mat,Vec,Vec)
10191: In particular each function MUST return an error code of 0 on success and
10192: nonzero on failure.
10194: This routine is distinct from MatShellSetOperation() in that it can be called on any matrix type.
10196: .seealso: MatGetOperation(), MatCreateShell(), MatShellSetContext(), MatShellSetOperation()
10197: @*/
10198: PetscErrorCode MatSetOperation(Mat mat,MatOperation op,void (*f)(void))
10199: {
10201: if (op == MATOP_VIEW && !mat->ops->viewnative && f != (void (*)(void))(mat->ops->view)) {
10202: mat->ops->viewnative = mat->ops->view;
10203: }
10204: (((void(**)(void))mat->ops)[op]) = f;
10205: return 0;
10206: }
10208: /*@C
10209: MatGetOperation - Gets a matrix operation for any matrix type.
10211: Not Collective
10213: Input Parameters:
10214: + mat - the matrix
10215: - op - the name of the operation
10217: Output Parameter:
10218: . f - the function that provides the operation
10220: Level: developer
10222: Usage:
10223: $ PetscErrorCode (*usermult)(Mat,Vec,Vec);
10224: $ MatGetOperation(A,MATOP_MULT,(void(**)(void))&usermult);
10226: Notes:
10227: See the file include/petscmat.h for a complete list of matrix
10228: operations, which all have the form MATOP_<OPERATION>, where
10229: <OPERATION> is the name (in all capital letters) of the
10230: user interface routine (e.g., MatMult() -> MATOP_MULT).
10232: This routine is distinct from MatShellGetOperation() in that it can be called on any matrix type.
10234: .seealso: MatSetOperation(), MatCreateShell(), MatShellGetContext(), MatShellGetOperation()
10235: @*/
10236: PetscErrorCode MatGetOperation(Mat mat,MatOperation op,void(**f)(void))
10237: {
10239: *f = (((void (**)(void))mat->ops)[op]);
10240: return 0;
10241: }
10243: /*@
10244: MatHasOperation - Determines whether the given matrix supports the particular
10245: operation.
10247: Not Collective
10249: Input Parameters:
10250: + mat - the matrix
10251: - op - the operation, for example, MATOP_GET_DIAGONAL
10253: Output Parameter:
10254: . has - either PETSC_TRUE or PETSC_FALSE
10256: Level: advanced
10258: Notes:
10259: See the file include/petscmat.h for a complete list of matrix
10260: operations, which all have the form MATOP_<OPERATION>, where
10261: <OPERATION> is the name (in all capital letters) of the
10262: user-level routine. E.g., MatNorm() -> MATOP_NORM.
10264: .seealso: MatCreateShell()
10265: @*/
10266: PetscErrorCode MatHasOperation(Mat mat,MatOperation op,PetscBool *has)
10267: {
10270: if (mat->ops->hasoperation) {
10271: (*mat->ops->hasoperation)(mat,op,has);
10272: } else {
10273: if (((void**)mat->ops)[op]) *has = PETSC_TRUE;
10274: else {
10275: *has = PETSC_FALSE;
10276: if (op == MATOP_CREATE_SUBMATRIX) {
10277: PetscMPIInt size;
10279: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
10280: if (size == 1) {
10281: MatHasOperation(mat,MATOP_CREATE_SUBMATRICES,has);
10282: }
10283: }
10284: }
10285: }
10286: return 0;
10287: }
10289: /*@
10290: MatHasCongruentLayouts - Determines whether the rows and columns layouts
10291: of the matrix are congruent
10293: Collective on mat
10295: Input Parameters:
10296: . mat - the matrix
10298: Output Parameter:
10299: . cong - either PETSC_TRUE or PETSC_FALSE
10301: Level: beginner
10303: Notes:
10305: .seealso: MatCreate(), MatSetSizes()
10306: @*/
10307: PetscErrorCode MatHasCongruentLayouts(Mat mat,PetscBool *cong)
10308: {
10312: if (!mat->rmap || !mat->cmap) {
10313: *cong = mat->rmap == mat->cmap ? PETSC_TRUE : PETSC_FALSE;
10314: return 0;
10315: }
10316: if (mat->congruentlayouts == PETSC_DECIDE) { /* first time we compare rows and cols layouts */
10317: PetscLayoutSetUp(mat->rmap);
10318: PetscLayoutSetUp(mat->cmap);
10319: PetscLayoutCompare(mat->rmap,mat->cmap,cong);
10320: if (*cong) mat->congruentlayouts = 1;
10321: else mat->congruentlayouts = 0;
10322: } else *cong = mat->congruentlayouts ? PETSC_TRUE : PETSC_FALSE;
10323: return 0;
10324: }
10326: PetscErrorCode MatSetInf(Mat A)
10327: {
10329: (*A->ops->setinf)(A);
10330: return 0;
10331: }