Actual source code: matrix.c

petsc-3.7.3 2016-08-01
Report Typos and Errors
  2: /*
  3:    This is where the abstract matrix operations are defined
  4: */

  6: #include <petsc/private/matimpl.h>        /*I "petscmat.h" I*/
  7: #include <petsc/private/vecimpl.h>
  8: #include <petsc/private/isimpl.h>

 10: /* Logging support */
 11: PetscClassId MAT_CLASSID;
 12: PetscClassId MAT_COLORING_CLASSID;
 13: PetscClassId MAT_FDCOLORING_CLASSID;
 14: PetscClassId MAT_TRANSPOSECOLORING_CLASSID;

 16: PetscLogEvent MAT_Mult, MAT_Mults, MAT_MultConstrained, MAT_MultAdd, MAT_MultTranspose;
 17: PetscLogEvent MAT_MultTransposeConstrained, MAT_MultTransposeAdd, MAT_Solve, MAT_Solves, MAT_SolveAdd, MAT_SolveTranspose, MAT_MatSolve;
 18: PetscLogEvent MAT_SolveTransposeAdd, MAT_SOR, MAT_ForwardSolve, MAT_BackwardSolve, MAT_LUFactor, MAT_LUFactorSymbolic;
 19: PetscLogEvent MAT_LUFactorNumeric, MAT_CholeskyFactor, MAT_CholeskyFactorSymbolic, MAT_CholeskyFactorNumeric, MAT_ILUFactor;
 20: PetscLogEvent MAT_ILUFactorSymbolic, MAT_ICCFactorSymbolic, MAT_Copy, MAT_Convert, MAT_Scale, MAT_AssemblyBegin;
 21: PetscLogEvent MAT_AssemblyEnd, MAT_SetValues, MAT_GetValues, MAT_GetRow, MAT_GetRowIJ, MAT_GetSubMatrices, MAT_GetOrdering, MAT_RedundantMat, MAT_GetSeqNonzeroStructure;
 22: PetscLogEvent MAT_IncreaseOverlap, MAT_Partitioning, MAT_Coarsen, MAT_ZeroEntries, MAT_Load, MAT_View, MAT_AXPY, MAT_FDColoringCreate;
 23: PetscLogEvent MAT_FDColoringSetUp, MAT_FDColoringApply,MAT_Transpose,MAT_FDColoringFunction, MAT_GetSubMatrix;
 24: PetscLogEvent MAT_TransposeColoringCreate;
 25: PetscLogEvent MAT_MatMult, MAT_MatMultSymbolic, MAT_MatMultNumeric;
 26: PetscLogEvent MAT_PtAP, MAT_PtAPSymbolic, MAT_PtAPNumeric,MAT_RARt, MAT_RARtSymbolic, MAT_RARtNumeric;
 27: PetscLogEvent MAT_MatTransposeMult, MAT_MatTransposeMultSymbolic, MAT_MatTransposeMultNumeric;
 28: PetscLogEvent MAT_TransposeMatMult, MAT_TransposeMatMultSymbolic, MAT_TransposeMatMultNumeric;
 29: PetscLogEvent MAT_MatMatMult, MAT_MatMatMultSymbolic, MAT_MatMatMultNumeric;
 30: PetscLogEvent MAT_MultHermitianTranspose,MAT_MultHermitianTransposeAdd;
 31: PetscLogEvent MAT_Getsymtranspose, MAT_Getsymtransreduced, MAT_Transpose_SeqAIJ, MAT_GetBrowsOfAcols;
 32: PetscLogEvent MAT_GetBrowsOfAocols, MAT_Getlocalmat, MAT_Getlocalmatcondensed, MAT_Seqstompi, MAT_Seqstompinum, MAT_Seqstompisym;
 33: PetscLogEvent MAT_Applypapt, MAT_Applypapt_numeric, MAT_Applypapt_symbolic, MAT_GetSequentialNonzeroStructure;
 34: PetscLogEvent MAT_GetMultiProcBlock;
 35: PetscLogEvent MAT_CUSPCopyToGPU, MAT_CUSPARSECopyToGPU, MAT_SetValuesBatch, MAT_SetValuesBatchI, MAT_SetValuesBatchII, MAT_SetValuesBatchIII, MAT_SetValuesBatchIV;
 36: PetscLogEvent MAT_ViennaCLCopyToGPU;
 37: PetscLogEvent MAT_Merge,MAT_Residual;
 38: PetscLogEvent Mat_Coloring_Apply,Mat_Coloring_Comm,Mat_Coloring_Local,Mat_Coloring_ISCreate,Mat_Coloring_SetUp,Mat_Coloring_Weights;

 40: const char *const MatFactorTypes[] = {"NONE","LU","CHOLESKY","ILU","ICC","ILUDT","MatFactorType","MAT_FACTOR_",0};

 44: /*@
 45:    MatSetRandom - Sets all components of a matrix to random numbers. For sparse matrices that have been preallocated it randomly selects appropriate locations

 47:    Logically Collective on Vec

 49:    Input Parameters:
 50: +  x  - the vector
 51: -  rctx - the random number context, formed by PetscRandomCreate(), or NULL and
 52:           it will create one internally.

 54:    Output Parameter:
 55: .  x  - the vector

 57:    Example of Usage:
 58: .vb
 59:      PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
 60:      VecSetRandom(x,rctx);
 61:      PetscRandomDestroy(rctx);
 62: .ve

 64:    Level: intermediate

 66:    Concepts: vector^setting to random
 67:    Concepts: random^vector

 69: .seealso: MatZeroEntries(), MatSetValues(), PetscRandomCreate(), PetscRandomDestroy()
 70: @*/
 71: PetscErrorCode MatSetRandom(Mat x,PetscRandom rctx)
 72: {
 74:   PetscRandom    randObj = NULL;


 81:   if (!rctx) {
 82:     MPI_Comm comm;
 83:     PetscObjectGetComm((PetscObject)x,&comm);
 84:     PetscRandomCreate(comm,&randObj);
 85:     PetscRandomSetFromOptions(randObj);
 86:     rctx = randObj;
 87:   }

 89:   PetscLogEventBegin(VEC_SetRandom,x,rctx,0,0);
 90:   (*x->ops->setrandom)(x,rctx);
 91:   PetscLogEventEnd(VEC_SetRandom,x,rctx,0,0);

 93:   x->assembled = PETSC_TRUE;
 94:   PetscRandomDestroy(&randObj);
 95:   return(0);
 96: }


101: /*@
102:       MatFindNonzeroRows - Locate all rows that are not completely zero in the matrix

104:   Input Parameter:
105: .    A  - the matrix

107:   Output Parameter:
108: .    keptrows - the rows that are not completely zero

110:   Level: intermediate

112:  @*/
113: PetscErrorCode MatFindNonzeroRows(Mat mat,IS *keptrows)
114: {

119:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
120:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
121:   if (!mat->ops->findnonzerorows) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not coded for this matrix type");
122:   (*mat->ops->findnonzerorows)(mat,keptrows);
123:   return(0);
124: }

128: /*@
129:    MatGetDiagonalBlock - Returns the part of the matrix associated with the on-process coupling

131:    Not Collective

133:    Input Parameters:
134: .   A - the matrix

136:    Output Parameters:
137: .   a - the diagonal part (which is a SEQUENTIAL matrix)

139:    Notes: see the manual page for MatCreateAIJ() for more information on the "diagonal part" of the matrix.
140:           Use caution, as the reference count on the returned matrix is not incremented and it is used as
141:           part of the containing MPI Mat's normal operation.

143:    Level: advanced

145: @*/
146: PetscErrorCode MatGetDiagonalBlock(Mat A,Mat *a)
147: {
148:   PetscErrorCode ierr,(*f)(Mat,Mat*);
149:   PetscMPIInt    size;

155:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
156:   MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
157:   PetscObjectQueryFunction((PetscObject)A,"MatGetDiagonalBlock_C",&f);
158:   if (f) {
159:     (*f)(A,a);
160:     return(0);
161:   } else if (size == 1) {
162:     *a = A;
163:   } else {
164:     MatType mattype;
165:     MatGetType(A,&mattype);
166:     SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix type %s does not support getting diagonal block",mattype);
167:   }
168:   return(0);
169: }

173: /*@
174:    MatGetTrace - Gets the trace of a matrix. The sum of the diagonal entries.

176:    Collective on Mat

178:    Input Parameters:
179: .  mat - the matrix

181:    Output Parameter:
182: .   trace - the sum of the diagonal entries

184:    Level: advanced

186: @*/
187: PetscErrorCode MatGetTrace(Mat mat,PetscScalar *trace)
188: {
190:   Vec            diag;

193:   MatCreateVecs(mat,&diag,NULL);
194:   MatGetDiagonal(mat,diag);
195:   VecSum(diag,trace);
196:   VecDestroy(&diag);
197:   return(0);
198: }

202: /*@
203:    MatRealPart - Zeros out the imaginary part of the matrix

205:    Logically Collective on Mat

207:    Input Parameters:
208: .  mat - the matrix

210:    Level: advanced


213: .seealso: MatImaginaryPart()
214: @*/
215: PetscErrorCode MatRealPart(Mat mat)
216: {

222:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
223:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
224:   if (!mat->ops->realpart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
225:   MatCheckPreallocated(mat,1);
226:   (*mat->ops->realpart)(mat);
227: #if defined(PETSC_HAVE_CUSP)
228:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
229:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
230:   }
231: #elif defined(PETSC_HAVE_VIENNACL)
232:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
233:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
234:   }
235: #elif defined(PETSC_HAVE_VECCUDA)
236:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
237:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
238:   }
239: #endif
240:   return(0);
241: }

245: /*@C
246:    MatGetGhosts - Get the global index of all ghost nodes defined by the sparse matrix

248:    Collective on Mat

250:    Input Parameter:
251: .  mat - the matrix

253:    Output Parameters:
254: +   nghosts - number of ghosts (note for BAIJ matrices there is one ghost for each block)
255: -   ghosts - the global indices of the ghost points

257:    Notes: the nghosts and ghosts are suitable to pass into VecCreateGhost()

259:    Level: advanced

261: @*/
262: PetscErrorCode MatGetGhosts(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
263: {

269:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
270:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
271:   if (!mat->ops->getghosts) {
272:     if (nghosts) *nghosts = 0;
273:     if (ghosts) *ghosts = 0;
274:   } else {
275:     (*mat->ops->getghosts)(mat,nghosts,ghosts);
276:   }
277:   return(0);
278: }


283: /*@
284:    MatImaginaryPart - Moves the imaginary part of the matrix to the real part and zeros the imaginary part

286:    Logically Collective on Mat

288:    Input Parameters:
289: .  mat - the matrix

291:    Level: advanced


294: .seealso: MatRealPart()
295: @*/
296: PetscErrorCode MatImaginaryPart(Mat mat)
297: {

303:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
304:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
305:   if (!mat->ops->imaginarypart) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
306:   MatCheckPreallocated(mat,1);
307:   (*mat->ops->imaginarypart)(mat);
308: #if defined(PETSC_HAVE_CUSP)
309:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
310:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
311:   }
312: #elif defined(PETSC_HAVE_VIENNACL)
313:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
314:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
315:   }
316: #elif defined(PETSC_HAVE_VECCUDA)
317:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
318:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
319:   }
320: #endif
321:   return(0);
322: }

326: /*@
327:    MatMissingDiagonal - Determine if sparse matrix is missing a diagonal entry (or block entry for BAIJ matrices)

329:    Collective on Mat

331:    Input Parameter:
332: .  mat - the matrix

334:    Output Parameters:
335: +  missing - is any diagonal missing
336: -  dd - first diagonal entry that is missing (optional)

338:    Level: advanced


341: .seealso: MatRealPart()
342: @*/
343: PetscErrorCode MatMissingDiagonal(Mat mat,PetscBool *missing,PetscInt *dd)
344: {

350:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
351:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
352:   if (!mat->ops->missingdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
353:   (*mat->ops->missingdiagonal)(mat,missing,dd);
354:   return(0);
355: }

359: /*@C
360:    MatGetRow - Gets a row of a matrix.  You MUST call MatRestoreRow()
361:    for each row that you get to ensure that your application does
362:    not bleed memory.

364:    Not Collective

366:    Input Parameters:
367: +  mat - the matrix
368: -  row - the row to get

370:    Output Parameters:
371: +  ncols -  if not NULL, the number of nonzeros in the row
372: .  cols - if not NULL, the column numbers
373: -  vals - if not NULL, the values

375:    Notes:
376:    This routine is provided for people who need to have direct access
377:    to the structure of a matrix.  We hope that we provide enough
378:    high-level matrix routines that few users will need it.

380:    MatGetRow() always returns 0-based column indices, regardless of
381:    whether the internal representation is 0-based (default) or 1-based.

383:    For better efficiency, set cols and/or vals to NULL if you do
384:    not wish to extract these quantities.

386:    The user can only examine the values extracted with MatGetRow();
387:    the values cannot be altered.  To change the matrix entries, one
388:    must use MatSetValues().

390:    You can only have one call to MatGetRow() outstanding for a particular
391:    matrix at a time, per processor. MatGetRow() can only obtain rows
392:    associated with the given processor, it cannot get rows from the
393:    other processors; for that we suggest using MatGetSubMatrices(), then
394:    MatGetRow() on the submatrix. The row indix passed to MatGetRows()
395:    is in the global number of rows.

397:    Fortran Notes:
398:    The calling sequence from Fortran is
399: .vb
400:    MatGetRow(matrix,row,ncols,cols,values,ierr)
401:          Mat     matrix (input)
402:          integer row    (input)
403:          integer ncols  (output)
404:          integer cols(maxcols) (output)
405:          double precision (or double complex) values(maxcols) output
406: .ve
407:    where maxcols >= maximum nonzeros in any row of the matrix.


410:    Caution:
411:    Do not try to change the contents of the output arrays (cols and vals).
412:    In some cases, this may corrupt the matrix.

414:    Level: advanced

416:    Concepts: matrices^row access

418: .seealso: MatRestoreRow(), MatSetValues(), MatGetValues(), MatGetSubMatrices(), MatGetDiagonal()
419: @*/
420: PetscErrorCode MatGetRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
421: {
423:   PetscInt       incols;

428:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
429:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
430:   if (!mat->ops->getrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
431:   MatCheckPreallocated(mat,1);
432:   PetscLogEventBegin(MAT_GetRow,mat,0,0,0);
433:   (*mat->ops->getrow)(mat,row,&incols,(PetscInt**)cols,(PetscScalar**)vals);
434:   if (ncols) *ncols = incols;
435:   PetscLogEventEnd(MAT_GetRow,mat,0,0,0);
436:   return(0);
437: }

441: /*@
442:    MatConjugate - replaces the matrix values with their complex conjugates

444:    Logically Collective on Mat

446:    Input Parameters:
447: .  mat - the matrix

449:    Level: advanced

451: .seealso:  VecConjugate()
452: @*/
453: PetscErrorCode MatConjugate(Mat mat)
454: {
455: #if defined(PETSC_USE_COMPLEX)

460:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
461:   if (!mat->ops->conjugate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not provided for this matrix format, send email to petsc-maint@mcs.anl.gov");
462:   (*mat->ops->conjugate)(mat);
463: #if defined(PETSC_HAVE_CUSP)
464:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
465:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
466:   }
467: #elif defined(PETSC_HAVE_VIENNACL)
468:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
469:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
470:   }
471: #elif defined(PETSC_HAVE_VECCUDA)
472:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
473:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
474:   }
475: #endif
476:   return(0);
477: #else
478:   return 0;
479: #endif
480: }

484: /*@C
485:    MatRestoreRow - Frees any temporary space allocated by MatGetRow().

487:    Not Collective

489:    Input Parameters:
490: +  mat - the matrix
491: .  row - the row to get
492: .  ncols, cols - the number of nonzeros and their columns
493: -  vals - if nonzero the column values

495:    Notes:
496:    This routine should be called after you have finished examining the entries.

498:    This routine zeros out ncols, cols, and vals. This is to prevent accidental
499:    us of the array after it has been restored. If you pass NULL, it will
500:    not zero the pointers.  Use of cols or vals after MatRestoreRow is invalid.

502:    Fortran Notes:
503:    The calling sequence from Fortran is
504: .vb
505:    MatRestoreRow(matrix,row,ncols,cols,values,ierr)
506:       Mat     matrix (input)
507:       integer row    (input)
508:       integer ncols  (output)
509:       integer cols(maxcols) (output)
510:       double precision (or double complex) values(maxcols) output
511: .ve
512:    Where maxcols >= maximum nonzeros in any row of the matrix.

514:    In Fortran MatRestoreRow() MUST be called after MatGetRow()
515:    before another call to MatGetRow() can be made.

517:    Level: advanced

519: .seealso:  MatGetRow()
520: @*/
521: PetscErrorCode MatRestoreRow(Mat mat,PetscInt row,PetscInt *ncols,const PetscInt *cols[],const PetscScalar *vals[])
522: {

528:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
529:   if (!mat->ops->restorerow) return(0);
530:   (*mat->ops->restorerow)(mat,row,ncols,(PetscInt **)cols,(PetscScalar **)vals);
531:   if (ncols) *ncols = 0;
532:   if (cols)  *cols = NULL;
533:   if (vals)  *vals = NULL;
534:   return(0);
535: }

539: /*@
540:    MatGetRowUpperTriangular - Sets a flag to enable calls to MatGetRow() for matrix in MATSBAIJ format.
541:    You should call MatRestoreRowUpperTriangular() after calling MatGetRow/MatRestoreRow() to disable the flag.

543:    Not Collective

545:    Input Parameters:
546: +  mat - the matrix

548:    Notes:
549:    The flag is to ensure that users are aware of MatGetRow() only provides the upper trianglular part of the row for the matrices in MATSBAIJ format.

551:    Level: advanced

553:    Concepts: matrices^row access

555: .seealso: MatRestoreRowRowUpperTriangular()
556: @*/
557: PetscErrorCode MatGetRowUpperTriangular(Mat mat)
558: {

564:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
565:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
566:   if (!mat->ops->getrowuppertriangular) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
567:   MatCheckPreallocated(mat,1);
568:   (*mat->ops->getrowuppertriangular)(mat);
569:   return(0);
570: }

574: /*@
575:    MatRestoreRowUpperTriangular - Disable calls to MatGetRow() for matrix in MATSBAIJ format.

577:    Not Collective

579:    Input Parameters:
580: +  mat - the matrix

582:    Notes:
583:    This routine should be called after you have finished MatGetRow/MatRestoreRow().


586:    Level: advanced

588: .seealso:  MatGetRowUpperTriangular()
589: @*/
590: PetscErrorCode MatRestoreRowUpperTriangular(Mat mat)
591: {

596:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
597:   if (!mat->ops->restorerowuppertriangular) return(0);
598:   (*mat->ops->restorerowuppertriangular)(mat);
599:   return(0);
600: }

604: /*@C
605:    MatSetOptionsPrefix - Sets the prefix used for searching for all
606:    Mat options in the database.

608:    Logically Collective on Mat

610:    Input Parameter:
611: +  A - the Mat context
612: -  prefix - the prefix to prepend to all option names

614:    Notes:
615:    A hyphen (-) must NOT be given at the beginning of the prefix name.
616:    The first character of all runtime options is AUTOMATICALLY the hyphen.

618:    Level: advanced

620: .keywords: Mat, set, options, prefix, database

622: .seealso: MatSetFromOptions()
623: @*/
624: PetscErrorCode MatSetOptionsPrefix(Mat A,const char prefix[])
625: {

630:   PetscObjectSetOptionsPrefix((PetscObject)A,prefix);
631:   return(0);
632: }

636: /*@C
637:    MatAppendOptionsPrefix - Appends to the prefix used for searching for all
638:    Mat options in the database.

640:    Logically Collective on Mat

642:    Input Parameters:
643: +  A - the Mat context
644: -  prefix - the prefix to prepend to all option names

646:    Notes:
647:    A hyphen (-) must NOT be given at the beginning of the prefix name.
648:    The first character of all runtime options is AUTOMATICALLY the hyphen.

650:    Level: advanced

652: .keywords: Mat, append, options, prefix, database

654: .seealso: MatGetOptionsPrefix()
655: @*/
656: PetscErrorCode MatAppendOptionsPrefix(Mat A,const char prefix[])
657: {

662:   PetscObjectAppendOptionsPrefix((PetscObject)A,prefix);
663:   return(0);
664: }

668: /*@C
669:    MatGetOptionsPrefix - Sets the prefix used for searching for all
670:    Mat options in the database.

672:    Not Collective

674:    Input Parameter:
675: .  A - the Mat context

677:    Output Parameter:
678: .  prefix - pointer to the prefix string used

680:    Notes: On the fortran side, the user should pass in a string 'prefix' of
681:    sufficient length to hold the prefix.

683:    Level: advanced

685: .keywords: Mat, get, options, prefix, database

687: .seealso: MatAppendOptionsPrefix()
688: @*/
689: PetscErrorCode MatGetOptionsPrefix(Mat A,const char *prefix[])
690: {

695:   PetscObjectGetOptionsPrefix((PetscObject)A,prefix);
696:   return(0);
697: }

701: /*@
702:    MatSetUp - Sets up the internal matrix data structures for the later use.

704:    Collective on Mat

706:    Input Parameters:
707: .  A - the Mat context

709:    Notes:
710:    If the user has not set preallocation for this matrix then a default preallocation that is likely to be inefficient is used.

712:    If a suitable preallocation routine is used, this function does not need to be called.

714:    See the Performance chapter of the PETSc users manual for how to preallocate matrices

716:    Level: beginner

718: .keywords: Mat, setup

720: .seealso: MatCreate(), MatDestroy()
721: @*/
722: PetscErrorCode MatSetUp(Mat A)
723: {
724:   PetscMPIInt    size;

729:   if (!((PetscObject)A)->type_name) {
730:     MPI_Comm_size(PetscObjectComm((PetscObject)A), &size);
731:     if (size == 1) {
732:       MatSetType(A, MATSEQAIJ);
733:     } else {
734:       MatSetType(A, MATMPIAIJ);
735:     }
736:   }
737:   if (!A->preallocated && A->ops->setup) {
738:     PetscInfo(A,"Warning not preallocating matrix storage\n");
739:     (*A->ops->setup)(A);
740:   }
741:   A->preallocated = PETSC_TRUE;
742:   return(0);
743: }

745: #if defined(PETSC_HAVE_SAWS)
746: #include <petscviewersaws.h>
747: #endif
750: /*@C
751:    MatView - Visualizes a matrix object.

753:    Collective on Mat

755:    Input Parameters:
756: +  mat - the matrix
757: -  viewer - visualization context

759:   Notes:
760:   The available visualization contexts include
761: +    PETSC_VIEWER_STDOUT_SELF - for sequential matrices
762: .    PETSC_VIEWER_STDOUT_WORLD - for parallel matrices created on PETSC_COMM_WORLD
763: .    PETSC_VIEWER_STDOUT_(comm) - for matrices created on MPI communicator comm
764: -     PETSC_VIEWER_DRAW_WORLD - graphical display of nonzero structure

766:    The user can open alternative visualization contexts with
767: +    PetscViewerASCIIOpen() - Outputs matrix to a specified file
768: .    PetscViewerBinaryOpen() - Outputs matrix in binary to a
769:          specified file; corresponding input uses MatLoad()
770: .    PetscViewerDrawOpen() - Outputs nonzero matrix structure to
771:          an X window display
772: -    PetscViewerSocketOpen() - Outputs matrix to Socket viewer.
773:          Currently only the sequential dense and AIJ
774:          matrix types support the Socket viewer.

776:    The user can call PetscViewerPushFormat() to specify the output
777:    format of ASCII printed objects (when using PETSC_VIEWER_STDOUT_SELF,
778:    PETSC_VIEWER_STDOUT_WORLD and PetscViewerASCIIOpen).  Available formats include
779: +    PETSC_VIEWER_DEFAULT - default, prints matrix contents
780: .    PETSC_VIEWER_ASCII_MATLAB - prints matrix contents in Matlab format
781: .    PETSC_VIEWER_ASCII_DENSE - prints entire matrix including zeros
782: .    PETSC_VIEWER_ASCII_COMMON - prints matrix contents, using a sparse
783:          format common among all matrix types
784: .    PETSC_VIEWER_ASCII_IMPL - prints matrix contents, using an implementation-specific
785:          format (which is in many cases the same as the default)
786: .    PETSC_VIEWER_ASCII_INFO - prints basic information about the matrix
787:          size and structure (not the matrix entries)
788: .    PETSC_VIEWER_ASCII_INFO_DETAIL - prints more detailed information about
789:          the matrix structure

791:    Options Database Keys:
792: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
793: .  -mat_view ::ascii_info_detail - Prints more detailed info
794: .  -mat_view - Prints matrix in ASCII format
795: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
796: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
797: .  -display <name> - Sets display name (default is host)
798: .  -draw_pause <sec> - Sets number of seconds to pause after display
799: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (see Users-Manual: Chapter 11 Using MATLAB with PETSc for details)
800: .  -viewer_socket_machine <machine> -
801: .  -viewer_socket_port <port> -
802: .  -mat_view binary - save matrix to file in binary format
803: -  -viewer_binary_filename <name> -
804:    Level: beginner

806:    Notes: see the manual page for MatLoad() for the exact format of the binary file when the binary
807:       viewer is used.

809:       See share/petsc/matlab/PetscBinaryRead.m for a Matlab code that can read in the binary file when the binary
810:       viewer is used.

812:       One can use '-mat_view draw -draw_pause -1' to pause the graphical display of matrix nonzero structure.
813:       And then use the following mouse functions:
814:           left mouse: zoom in
815:           middle mouse: zoom out
816:           right mouse: continue with the simulation

818:    Concepts: matrices^viewing
819:    Concepts: matrices^plotting
820:    Concepts: matrices^printing

822: .seealso: PetscViewerPushFormat(), PetscViewerASCIIOpen(), PetscViewerDrawOpen(),
823:           PetscViewerSocketOpen(), PetscViewerBinaryOpen(), MatLoad()
824: @*/
825: PetscErrorCode MatView(Mat mat,PetscViewer viewer)
826: {
827:   PetscErrorCode    ierr;
828:   PetscInt          rows,cols,rbs,cbs;
829:   PetscBool         iascii,ibinary;
830:   PetscViewerFormat format;
831: #if defined(PETSC_HAVE_SAWS)
832:   PetscBool         issaws;
833: #endif

838:   if (!viewer) {
839:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)mat),&viewer);
840:   }
843:   MatCheckPreallocated(mat,1);
844:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&ibinary);
845:   if (ibinary) {
846:     PetscBool mpiio;
847:     PetscViewerBinaryGetUseMPIIO(viewer,&mpiio);
848:     if (mpiio) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"PETSc matrix viewers do not support using MPI-IO, turn off that flag");
849:   }

851:   PetscLogEventBegin(MAT_View,mat,viewer,0,0);
852:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
853:   PetscViewerGetFormat(viewer,&format);
854:   if ((!iascii || (format != PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL)) && mat->factortype) {
855:     SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"No viewers for factored matrix except ASCII info or info_detailed");
856:   }

858: #if defined(PETSC_HAVE_SAWS)
859:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSAWS,&issaws);
860: #endif
861:   if (iascii) {
862:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
863:     PetscObjectPrintClassNamePrefixType((PetscObject)mat,viewer);
864:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
865:       PetscViewerASCIIPushTab(viewer);
866:       MatGetSize(mat,&rows,&cols);
867:       MatGetBlockSizes(mat,&rbs,&cbs);
868:       if (rbs != 1 || cbs != 1) {
869:         if (rbs != cbs) {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, rbs=%D, cbs = %D\n",rows,cols,rbs,cbs);}
870:         else            {PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D, bs=%D\n",rows,cols,rbs);}
871:       } else {
872:         PetscViewerASCIIPrintf(viewer,"rows=%D, cols=%D\n",rows,cols);
873:       }
874:       if (mat->factortype) {
875:         const MatSolverPackage solver;
876:         MatFactorGetSolverPackage(mat,&solver);
877:         PetscViewerASCIIPrintf(viewer,"package used to perform factorization: %s\n",solver);
878:       }
879:       if (mat->ops->getinfo) {
880:         MatInfo info;
881:         MatGetInfo(mat,MAT_GLOBAL_SUM,&info);
882:         PetscViewerASCIIPrintf(viewer,"total: nonzeros=%.f, allocated nonzeros=%.f\n",info.nz_used,info.nz_allocated);
883:         PetscViewerASCIIPrintf(viewer,"total number of mallocs used during MatSetValues calls =%D\n",(PetscInt)info.mallocs);
884:       }
885:       if (mat->nullsp) {PetscViewerASCIIPrintf(viewer,"  has attached null space\n");}
886:       if (mat->nearnullsp) {PetscViewerASCIIPrintf(viewer,"  has attached near null space\n");}
887:     }
888: #if defined(PETSC_HAVE_SAWS)
889:   } else if (issaws) {
890:     PetscMPIInt rank;

892:     PetscObjectName((PetscObject)mat);
893:     MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
894:     if (!((PetscObject)mat)->amsmem && !rank) {
895:       PetscObjectViewSAWs((PetscObject)mat,viewer);
896:     }
897: #endif
898:   }
899:   if (mat->ops->view) {
900:     PetscViewerASCIIPushTab(viewer);
901:     (*mat->ops->view)(mat,viewer);
902:     PetscViewerASCIIPopTab(viewer);
903:   }
904:   if (iascii) {
905:     if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ORDER,"Must call MatAssemblyBegin/End() before viewing matrix");
906:     PetscViewerGetFormat(viewer,&format);
907:     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
908:       PetscViewerASCIIPopTab(viewer);
909:     }
910:   }
911:   PetscLogEventEnd(MAT_View,mat,viewer,0,0);
912:   return(0);
913: }

915: #if defined(PETSC_USE_DEBUG)
916: #include <../src/sys/totalview/tv_data_display.h>
917: PETSC_UNUSED static int TV_display_type(const struct _p_Mat *mat)
918: {
919:   TV_add_row("Local rows", "int", &mat->rmap->n);
920:   TV_add_row("Local columns", "int", &mat->cmap->n);
921:   TV_add_row("Global rows", "int", &mat->rmap->N);
922:   TV_add_row("Global columns", "int", &mat->cmap->N);
923:   TV_add_row("Typename", TV_ascii_string_type, ((PetscObject)mat)->type_name);
924:   return TV_format_OK;
925: }
926: #endif

930: /*@C
931:    MatLoad - Loads a matrix that has been stored in binary format
932:    with MatView().  The matrix format is determined from the options database.
933:    Generates a parallel MPI matrix if the communicator has more than one
934:    processor.  The default matrix type is AIJ.

936:    Collective on PetscViewer

938:    Input Parameters:
939: +  newmat - the newly loaded matrix, this needs to have been created with MatCreate()
940:             or some related function before a call to MatLoad()
941: -  viewer - binary file viewer, created with PetscViewerBinaryOpen()

943:    Options Database Keys:
944:    Used with block matrix formats (MATSEQBAIJ,  ...) to specify
945:    block size
946: .    -matload_block_size <bs>

948:    Level: beginner

950:    Notes:
951:    If the Mat type has not yet been given then MATAIJ is used, call MatSetFromOptions() on the
952:    Mat before calling this routine if you wish to set it from the options database.

954:    MatLoad() automatically loads into the options database any options
955:    given in the file filename.info where filename is the name of the file
956:    that was passed to the PetscViewerBinaryOpen(). The options in the info
957:    file will be ignored if you use the -viewer_binary_skip_info option.

959:    If the type or size of newmat is not set before a call to MatLoad, PETSc
960:    sets the default matrix type AIJ and sets the local and global sizes.
961:    If type and/or size is already set, then the same are used.

963:    In parallel, each processor can load a subset of rows (or the
964:    entire matrix).  This routine is especially useful when a large
965:    matrix is stored on disk and only part of it is desired on each
966:    processor.  For example, a parallel solver may access only some of
967:    the rows from each processor.  The algorithm used here reads
968:    relatively small blocks of data rather than reading the entire
969:    matrix and then subsetting it.

971:    Notes for advanced users:
972:    Most users should not need to know the details of the binary storage
973:    format, since MatLoad() and MatView() completely hide these details.
974:    But for anyone who's interested, the standard binary matrix storage
975:    format is

977: $    int    MAT_FILE_CLASSID
978: $    int    number of rows
979: $    int    number of columns
980: $    int    total number of nonzeros
981: $    int    *number nonzeros in each row
982: $    int    *column indices of all nonzeros (starting index is zero)
983: $    PetscScalar *values of all nonzeros

985:    PETSc automatically does the byte swapping for
986: machines that store the bytes reversed, e.g.  DEC alpha, freebsd,
987: linux, Windows and the paragon; thus if you write your own binary
988: read/write routines you have to swap the bytes; see PetscBinaryRead()
989: and PetscBinaryWrite() to see how this may be done.

991: .keywords: matrix, load, binary, input

993: .seealso: PetscViewerBinaryOpen(), MatView(), VecLoad()

995:  @*/
996: PetscErrorCode MatLoad(Mat newmat,PetscViewer viewer)
997: {
999:   PetscBool      isbinary,flg;

1004:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
1005:   if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid viewer; open viewer with PetscViewerBinaryOpen()");

1007:   if (!((PetscObject)newmat)->type_name) {
1008:     MatSetType(newmat,MATAIJ);
1009:   }

1011:   if (!newmat->ops->load) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatLoad is not supported for type");
1012:   PetscLogEventBegin(MAT_Load,viewer,0,0,0);
1013:   (*newmat->ops->load)(newmat,viewer);
1014:   PetscLogEventEnd(MAT_Load,viewer,0,0,0);

1016:   flg  = PETSC_FALSE;
1017:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_symmetric",&flg,NULL);
1018:   if (flg) {
1019:     MatSetOption(newmat,MAT_SYMMETRIC,PETSC_TRUE);
1020:     MatSetOption(newmat,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
1021:   }
1022:   flg  = PETSC_FALSE;
1023:   PetscOptionsGetBool(((PetscObject)newmat)->options,((PetscObject)newmat)->prefix,"-matload_spd",&flg,NULL);
1024:   if (flg) {
1025:     MatSetOption(newmat,MAT_SPD,PETSC_TRUE);
1026:   }
1027:   return(0);
1028: }

1032: PetscErrorCode MatDestroy_Redundant(Mat_Redundant **redundant)
1033: {
1035:   Mat_Redundant  *redund = *redundant;
1036:   PetscInt       i;

1039:   if (redund){
1040:     if (redund->matseq) { /* via MatGetSubMatrices()  */
1041:       ISDestroy(&redund->isrow);
1042:       ISDestroy(&redund->iscol);
1043:       MatDestroy(&redund->matseq[0]);
1044:       PetscFree(redund->matseq);
1045:     } else {
1046:       PetscFree2(redund->send_rank,redund->recv_rank);
1047:       PetscFree(redund->sbuf_j);
1048:       PetscFree(redund->sbuf_a);
1049:       for (i=0; i<redund->nrecvs; i++) {
1050:         PetscFree(redund->rbuf_j[i]);
1051:         PetscFree(redund->rbuf_a[i]);
1052:       }
1053:       PetscFree4(redund->sbuf_nz,redund->rbuf_nz,redund->rbuf_j,redund->rbuf_a);
1054:     }

1056:     if (redund->subcomm) {
1057:       PetscCommDestroy(&redund->subcomm);
1058:     }
1059:     PetscFree(redund);
1060:   }
1061:   return(0);
1062: }

1066: /*@
1067:    MatDestroy - Frees space taken by a matrix.

1069:    Collective on Mat

1071:    Input Parameter:
1072: .  A - the matrix

1074:    Level: beginner

1076: @*/
1077: PetscErrorCode MatDestroy(Mat *A)
1078: {

1082:   if (!*A) return(0);
1084:   if (--((PetscObject)(*A))->refct > 0) {*A = NULL; return(0);}

1086:   /* if memory was published with SAWs then destroy it */
1087:   PetscObjectSAWsViewOff((PetscObject)*A);
1088:   if ((*A)->ops->destroy) {
1089:     (*(*A)->ops->destroy)(*A);
1090:   }

1092:   PetscFree((*A)->solvertype);
1093:   MatDestroy_Redundant(&(*A)->redundant);
1094:   MatNullSpaceDestroy(&(*A)->nullsp);
1095:   MatNullSpaceDestroy(&(*A)->transnullsp);
1096:   MatNullSpaceDestroy(&(*A)->nearnullsp);
1097:   PetscLayoutDestroy(&(*A)->rmap);
1098:   PetscLayoutDestroy(&(*A)->cmap);
1099:   PetscHeaderDestroy(A);
1100:   return(0);
1101: }

1105: /*@
1106:    MatSetValues - Inserts or adds a block of values into a matrix.
1107:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
1108:    MUST be called after all calls to MatSetValues() have been completed.

1110:    Not Collective

1112:    Input Parameters:
1113: +  mat - the matrix
1114: .  v - a logically two-dimensional array of values
1115: .  m, idxm - the number of rows and their global indices
1116: .  n, idxn - the number of columns and their global indices
1117: -  addv - either ADD_VALUES or INSERT_VALUES, where
1118:    ADD_VALUES adds values to any existing entries, and
1119:    INSERT_VALUES replaces existing entries with new values

1121:    Notes:
1122:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1123:       MatSetUp() before using this routine

1125:    By default the values, v, are row-oriented. See MatSetOption() for other options.

1127:    Calls to MatSetValues() with the INSERT_VALUES and ADD_VALUES
1128:    options cannot be mixed without intervening calls to the assembly
1129:    routines.

1131:    MatSetValues() uses 0-based row and column numbers in Fortran
1132:    as well as in C.

1134:    Negative indices may be passed in idxm and idxn, these rows and columns are
1135:    simply ignored. This allows easily inserting element stiffness matrices
1136:    with homogeneous Dirchlet boundary conditions that you don't want represented
1137:    in the matrix.

1139:    Efficiency Alert:
1140:    The routine MatSetValuesBlocked() may offer much better efficiency
1141:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1143:    Level: beginner

1145:    Concepts: matrices^putting entries in

1147: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1148:           InsertMode, INSERT_VALUES, ADD_VALUES
1149: @*/
1150: PetscErrorCode MatSetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1151: {
1153: #if defined(PETSC_USE_DEBUG)
1154:   PetscInt       i,j;
1155: #endif

1160:   if (!m || !n) return(0); /* no values to insert */
1164:   MatCheckPreallocated(mat,1);
1165:   if (mat->insertmode == NOT_SET_VALUES) {
1166:     mat->insertmode = addv;
1167:   }
1168: #if defined(PETSC_USE_DEBUG)
1169:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1170:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1171:   if (!mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);

1173:   for (i=0; i<m; i++) {
1174:     for (j=0; j<n; j++) {
1175:       if (mat->erroriffailure && PetscIsInfOrNanScalar(v[i*n+j]))
1176: #if defined(PETSC_USE_COMPLEX)
1177:         SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g+ig at matrix entry (%D,%D)",(double)PetscRealPart(v[i*n+j]),(double)PetscImaginaryPart(v[i*n+j]),idxm[i],idxn[j]);
1178: #else
1179:         SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_FP,"Inserting %g at matrix entry (%D,%D)",(double)v[i*n+j],idxm[i],idxn[j]);
1180: #endif
1181:     }
1182:   }
1183: #endif

1185:   if (mat->assembled) {
1186:     mat->was_assembled = PETSC_TRUE;
1187:     mat->assembled     = PETSC_FALSE;
1188:   }
1189:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1190:   (*mat->ops->setvalues)(mat,m,idxm,n,idxn,v,addv);
1191:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1192: #if defined(PETSC_HAVE_CUSP)
1193:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1194:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1195:   }
1196: #elif defined(PETSC_HAVE_VIENNACL)
1197:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1198:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1199:   }
1200: #elif defined(PETSC_HAVE_VECCUDA)
1201:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1202:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1203:   }
1204: #endif
1205:   return(0);
1206: }


1211: /*@
1212:    MatSetValuesRowLocal - Inserts a row (block row for BAIJ matrices) of nonzero
1213:         values into a matrix

1215:    Not Collective

1217:    Input Parameters:
1218: +  mat - the matrix
1219: .  row - the (block) row to set
1220: -  v - a logically two-dimensional array of values

1222:    Notes:
1223:    By the values, v, are column-oriented (for the block version) and sorted

1225:    All the nonzeros in the row must be provided

1227:    The matrix must have previously had its column indices set

1229:    The row must belong to this process

1231:    Level: intermediate

1233:    Concepts: matrices^putting entries in

1235: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1236:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues(), MatSetValuesRow(), MatSetLocalToGlobalMapping()
1237: @*/
1238: PetscErrorCode MatSetValuesRowLocal(Mat mat,PetscInt row,const PetscScalar v[])
1239: {
1241:   PetscInt       globalrow;

1247:   ISLocalToGlobalMappingApply(mat->rmap->mapping,1,&row,&globalrow);
1248:   MatSetValuesRow(mat,globalrow,v);
1249: #if defined(PETSC_HAVE_CUSP)
1250:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1251:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1252:   }
1253: #elif defined(PETSC_HAVE_VIENNACL)
1254:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1255:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1256:   }
1257: #elif defined(PETSC_HAVE_VECCUDA)
1258:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1259:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1260:   }
1261: #endif
1262:   return(0);
1263: }

1267: /*@
1268:    MatSetValuesRow - Inserts a row (block row for BAIJ matrices) of nonzero
1269:         values into a matrix

1271:    Not Collective

1273:    Input Parameters:
1274: +  mat - the matrix
1275: .  row - the (block) row to set
1276: -  v - a logically two-dimensional array of values

1278:    Notes:
1279:    The values, v, are column-oriented for the block version.

1281:    All the nonzeros in the row must be provided

1283:    THE MATRIX MUSAT HAVE PREVIOUSLY HAD ITS COLUMN INDICES SET. IT IS RARE THAT THIS ROUTINE IS USED, usually MatSetValues() is used.

1285:    The row must belong to this process

1287:    Level: advanced

1289:    Concepts: matrices^putting entries in

1291: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1292:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1293: @*/
1294: PetscErrorCode MatSetValuesRow(Mat mat,PetscInt row,const PetscScalar v[])
1295: {

1301:   MatCheckPreallocated(mat,1);
1303: #if defined(PETSC_USE_DEBUG)
1304:   if (mat->insertmode == ADD_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add and insert values");
1305:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1306: #endif
1307:   mat->insertmode = INSERT_VALUES;

1309:   if (mat->assembled) {
1310:     mat->was_assembled = PETSC_TRUE;
1311:     mat->assembled     = PETSC_FALSE;
1312:   }
1313:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1314:   if (!mat->ops->setvaluesrow) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1315:   (*mat->ops->setvaluesrow)(mat,row,v);
1316:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1317: #if defined(PETSC_HAVE_CUSP)
1318:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1319:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1320:   }
1321: #elif defined(PETSC_HAVE_VIENNACL)
1322:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1323:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1324:   }
1325: #elif defined(PETSC_HAVE_VECCUDA)
1326:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1327:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1328:   }
1329: #endif
1330:   return(0);
1331: }

1335: /*@
1336:    MatSetValuesStencil - Inserts or adds a block of values into a matrix.
1337:      Using structured grid indexing

1339:    Not Collective

1341:    Input Parameters:
1342: +  mat - the matrix
1343: .  m - number of rows being entered
1344: .  idxm - grid coordinates (and component number when dof > 1) for matrix rows being entered
1345: .  n - number of columns being entered
1346: .  idxn - grid coordinates (and component number when dof > 1) for matrix columns being entered
1347: .  v - a logically two-dimensional array of values
1348: -  addv - either ADD_VALUES or INSERT_VALUES, where
1349:    ADD_VALUES adds values to any existing entries, and
1350:    INSERT_VALUES replaces existing entries with new values

1352:    Notes:
1353:    By default the values, v, are row-oriented.  See MatSetOption() for other options.

1355:    Calls to MatSetValuesStencil() with the INSERT_VALUES and ADD_VALUES
1356:    options cannot be mixed without intervening calls to the assembly
1357:    routines.

1359:    The grid coordinates are across the entire grid, not just the local portion

1361:    MatSetValuesStencil() uses 0-based row and column numbers in Fortran
1362:    as well as in C.

1364:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1366:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1367:    or call MatSetLocalToGlobalMapping() and MatSetStencil() first.

1369:    The columns and rows in the stencil passed in MUST be contained within the
1370:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1371:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1372:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1373:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1375:    In Fortran idxm and idxn should be declared as
1376: $     MatStencil idxm(4,m),idxn(4,n)
1377:    and the values inserted using
1378: $    idxm(MatStencil_i,1) = i
1379: $    idxm(MatStencil_j,1) = j
1380: $    idxm(MatStencil_k,1) = k
1381: $    idxm(MatStencil_c,1) = c
1382:    etc

1384:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
1385:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
1386:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
1387:    DM_BOUNDARY_PERIODIC boundary type.

1389:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
1390:    a single value per point) you can skip filling those indices.

1392:    Inspired by the structured grid interface to the HYPRE package
1393:    (http://www.llnl.gov/CASC/hypre)

1395:    Efficiency Alert:
1396:    The routine MatSetValuesBlockedStencil() may offer much better efficiency
1397:    for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).

1399:    Level: beginner

1401:    Concepts: matrices^putting entries in

1403: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1404:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil
1405: @*/
1406: PetscErrorCode MatSetValuesStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1407: {
1409:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1410:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1411:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1414:   if (!m || !n) return(0); /* no values to insert */

1421:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1422:     jdxm = buf; jdxn = buf+m;
1423:   } else {
1424:     PetscMalloc2(m,&bufm,n,&bufn);
1425:     jdxm = bufm; jdxn = bufn;
1426:   }
1427:   for (i=0; i<m; i++) {
1428:     for (j=0; j<3-sdim; j++) dxm++;
1429:     tmp = *dxm++ - starts[0];
1430:     for (j=0; j<dim-1; j++) {
1431:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1432:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1433:     }
1434:     if (mat->stencil.noc) dxm++;
1435:     jdxm[i] = tmp;
1436:   }
1437:   for (i=0; i<n; i++) {
1438:     for (j=0; j<3-sdim; j++) dxn++;
1439:     tmp = *dxn++ - starts[0];
1440:     for (j=0; j<dim-1; j++) {
1441:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1442:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1443:     }
1444:     if (mat->stencil.noc) dxn++;
1445:     jdxn[i] = tmp;
1446:   }
1447:   MatSetValuesLocal(mat,m,jdxm,n,jdxn,v,addv);
1448:   PetscFree2(bufm,bufn);
1449:   return(0);
1450: }

1454: /*@
1455:    MatSetValuesBlockedStencil - Inserts or adds a block of values into a matrix.
1456:      Using structured grid indexing

1458:    Not Collective

1460:    Input Parameters:
1461: +  mat - the matrix
1462: .  m - number of rows being entered
1463: .  idxm - grid coordinates for matrix rows being entered
1464: .  n - number of columns being entered
1465: .  idxn - grid coordinates for matrix columns being entered
1466: .  v - a logically two-dimensional array of values
1467: -  addv - either ADD_VALUES or INSERT_VALUES, where
1468:    ADD_VALUES adds values to any existing entries, and
1469:    INSERT_VALUES replaces existing entries with new values

1471:    Notes:
1472:    By default the values, v, are row-oriented and unsorted.
1473:    See MatSetOption() for other options.

1475:    Calls to MatSetValuesBlockedStencil() with the INSERT_VALUES and ADD_VALUES
1476:    options cannot be mixed without intervening calls to the assembly
1477:    routines.

1479:    The grid coordinates are across the entire grid, not just the local portion

1481:    MatSetValuesBlockedStencil() uses 0-based row and column numbers in Fortran
1482:    as well as in C.

1484:    For setting/accessing vector values via array coordinates you can use the DMDAVecGetArray() routine

1486:    In order to use this routine you must either obtain the matrix with DMCreateMatrix()
1487:    or call MatSetBlockSize(), MatSetLocalToGlobalMapping() and MatSetStencil() first.

1489:    The columns and rows in the stencil passed in MUST be contained within the
1490:    ghost region of the given process as set with DMDACreateXXX() or MatSetStencil(). For example,
1491:    if you create a DMDA with an overlap of one grid level and on a particular process its first
1492:    local nonghost x logical coordinate is 6 (so its first ghost x logical coordinate is 5) the
1493:    first i index you can use in your column and row indices in MatSetStencil() is 5.

1495:    In Fortran idxm and idxn should be declared as
1496: $     MatStencil idxm(4,m),idxn(4,n)
1497:    and the values inserted using
1498: $    idxm(MatStencil_i,1) = i
1499: $    idxm(MatStencil_j,1) = j
1500: $    idxm(MatStencil_k,1) = k
1501:    etc

1503:    Negative indices may be passed in idxm and idxn, these rows and columns are
1504:    simply ignored. This allows easily inserting element stiffness matrices
1505:    with homogeneous Dirchlet boundary conditions that you don't want represented
1506:    in the matrix.

1508:    Inspired by the structured grid interface to the HYPRE package
1509:    (http://www.llnl.gov/CASC/hypre)

1511:    Level: beginner

1513:    Concepts: matrices^putting entries in

1515: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1516:           MatSetValues(), MatSetValuesStencil(), MatSetStencil(), DMCreateMatrix(), DMDAVecGetArray(), MatStencil,
1517:           MatSetBlockSize(), MatSetLocalToGlobalMapping()
1518: @*/
1519: PetscErrorCode MatSetValuesBlockedStencil(Mat mat,PetscInt m,const MatStencil idxm[],PetscInt n,const MatStencil idxn[],const PetscScalar v[],InsertMode addv)
1520: {
1522:   PetscInt       buf[8192],*bufm=0,*bufn=0,*jdxm,*jdxn;
1523:   PetscInt       j,i,dim = mat->stencil.dim,*dims = mat->stencil.dims+1,tmp;
1524:   PetscInt       *starts = mat->stencil.starts,*dxm = (PetscInt*)idxm,*dxn = (PetscInt*)idxn,sdim = dim - (1 - (PetscInt)mat->stencil.noc);

1527:   if (!m || !n) return(0); /* no values to insert */

1534:   if ((m+n) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1535:     jdxm = buf; jdxn = buf+m;
1536:   } else {
1537:     PetscMalloc2(m,&bufm,n,&bufn);
1538:     jdxm = bufm; jdxn = bufn;
1539:   }
1540:   for (i=0; i<m; i++) {
1541:     for (j=0; j<3-sdim; j++) dxm++;
1542:     tmp = *dxm++ - starts[0];
1543:     for (j=0; j<sdim-1; j++) {
1544:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1545:       else                                       tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
1546:     }
1547:     dxm++;
1548:     jdxm[i] = tmp;
1549:   }
1550:   for (i=0; i<n; i++) {
1551:     for (j=0; j<3-sdim; j++) dxn++;
1552:     tmp = *dxn++ - starts[0];
1553:     for (j=0; j<sdim-1; j++) {
1554:       if ((*dxn++ - starts[j+1]) < 0 || tmp < 0) tmp = -1;
1555:       else                                       tmp = tmp*dims[j] + *(dxn-1) - starts[j+1];
1556:     }
1557:     dxn++;
1558:     jdxn[i] = tmp;
1559:   }
1560:   MatSetValuesBlockedLocal(mat,m,jdxm,n,jdxn,v,addv);
1561:   PetscFree2(bufm,bufn);
1562: #if defined(PETSC_HAVE_CUSP)
1563:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1564:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1565:   }
1566: #elif defined(PETSC_HAVE_VIENNACL)
1567:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1568:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1569:   }
1570: #elif defined(PETSC_HAVE_VECCUDA)
1571:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1572:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1573:   }
1574: #endif
1575:   return(0);
1576: }

1580: /*@
1581:    MatSetStencil - Sets the grid information for setting values into a matrix via
1582:         MatSetValuesStencil()

1584:    Not Collective

1586:    Input Parameters:
1587: +  mat - the matrix
1588: .  dim - dimension of the grid 1, 2, or 3
1589: .  dims - number of grid points in x, y, and z direction, including ghost points on your processor
1590: .  starts - starting point of ghost nodes on your processor in x, y, and z direction
1591: -  dof - number of degrees of freedom per node


1594:    Inspired by the structured grid interface to the HYPRE package
1595:    (www.llnl.gov/CASC/hyper)

1597:    For matrices generated with DMCreateMatrix() this routine is automatically called and so not needed by the
1598:    user.

1600:    Level: beginner

1602:    Concepts: matrices^putting entries in

1604: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal()
1605:           MatSetValues(), MatSetValuesBlockedStencil(), MatSetValuesStencil()
1606: @*/
1607: PetscErrorCode MatSetStencil(Mat mat,PetscInt dim,const PetscInt dims[],const PetscInt starts[],PetscInt dof)
1608: {
1609:   PetscInt i;


1616:   mat->stencil.dim = dim + (dof > 1);
1617:   for (i=0; i<dim; i++) {
1618:     mat->stencil.dims[i]   = dims[dim-i-1];      /* copy the values in backwards */
1619:     mat->stencil.starts[i] = starts[dim-i-1];
1620:   }
1621:   mat->stencil.dims[dim]   = dof;
1622:   mat->stencil.starts[dim] = 0;
1623:   mat->stencil.noc         = (PetscBool)(dof == 1);
1624:   return(0);
1625: }

1629: /*@
1630:    MatSetValuesBlocked - Inserts or adds a block of values into a matrix.

1632:    Not Collective

1634:    Input Parameters:
1635: +  mat - the matrix
1636: .  v - a logically two-dimensional array of values
1637: .  m, idxm - the number of block rows and their global block indices
1638: .  n, idxn - the number of block columns and their global block indices
1639: -  addv - either ADD_VALUES or INSERT_VALUES, where
1640:    ADD_VALUES adds values to any existing entries, and
1641:    INSERT_VALUES replaces existing entries with new values

1643:    Notes:
1644:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call
1645:    MatXXXXSetPreallocation() or MatSetUp() before using this routine.

1647:    The m and n count the NUMBER of blocks in the row direction and column direction,
1648:    NOT the total number of rows/columns; for example, if the block size is 2 and
1649:    you are passing in values for rows 2,3,4,5  then m would be 2 (not 4).
1650:    The values in idxm would be 1 2; that is the first index for each block divided by
1651:    the block size.

1653:    Note that you must call MatSetBlockSize() when constructing this matrix (before
1654:    preallocating it).

1656:    By default the values, v, are row-oriented, so the layout of
1657:    v is the same as for MatSetValues(). See MatSetOption() for other options.

1659:    Calls to MatSetValuesBlocked() with the INSERT_VALUES and ADD_VALUES
1660:    options cannot be mixed without intervening calls to the assembly
1661:    routines.

1663:    MatSetValuesBlocked() uses 0-based row and column numbers in Fortran
1664:    as well as in C.

1666:    Negative indices may be passed in idxm and idxn, these rows and columns are
1667:    simply ignored. This allows easily inserting element stiffness matrices
1668:    with homogeneous Dirchlet boundary conditions that you don't want represented
1669:    in the matrix.

1671:    Each time an entry is set within a sparse matrix via MatSetValues(),
1672:    internal searching must be done to determine where to place the
1673:    data in the matrix storage space.  By instead inserting blocks of
1674:    entries via MatSetValuesBlocked(), the overhead of matrix assembly is
1675:    reduced.

1677:    Example:
1678: $   Suppose m=n=2 and block size(bs) = 2 The array is
1679: $
1680: $   1  2  | 3  4
1681: $   5  6  | 7  8
1682: $   - - - | - - -
1683: $   9  10 | 11 12
1684: $   13 14 | 15 16
1685: $
1686: $   v[] should be passed in like
1687: $   v[] = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
1688: $
1689: $  If you are not using row oriented storage of v (that is you called MatSetOption(mat,MAT_ROW_ORIENTED,PETSC_FALSE)) then
1690: $   v[] = [1,5,9,13,2,6,10,14,3,7,11,15,4,8,12,16]

1692:    Level: intermediate

1694:    Concepts: matrices^putting entries in blocked

1696: .seealso: MatSetBlockSize(), MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesBlockedLocal()
1697: @*/
1698: PetscErrorCode MatSetValuesBlocked(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
1699: {

1705:   if (!m || !n) return(0); /* no values to insert */
1709:   MatCheckPreallocated(mat,1);
1710:   if (mat->insertmode == NOT_SET_VALUES) {
1711:     mat->insertmode = addv;
1712:   }
1713: #if defined(PETSC_USE_DEBUG)
1714:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
1715:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1716:   if (!mat->ops->setvaluesblocked && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1717: #endif

1719:   if (mat->assembled) {
1720:     mat->was_assembled = PETSC_TRUE;
1721:     mat->assembled     = PETSC_FALSE;
1722:   }
1723:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
1724:   if (mat->ops->setvaluesblocked) {
1725:     (*mat->ops->setvaluesblocked)(mat,m,idxm,n,idxn,v,addv);
1726:   } else {
1727:     PetscInt buf[8192],*bufr=0,*bufc=0,*iidxm,*iidxn;
1728:     PetscInt i,j,bs,cbs;
1729:     MatGetBlockSizes(mat,&bs,&cbs);
1730:     if (m*bs+n*cbs <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
1731:       iidxm = buf; iidxn = buf + m*bs;
1732:     } else {
1733:       PetscMalloc2(m*bs,&bufr,n*cbs,&bufc);
1734:       iidxm = bufr; iidxn = bufc;
1735:     }
1736:     for (i=0; i<m; i++) {
1737:       for (j=0; j<bs; j++) {
1738:         iidxm[i*bs+j] = bs*idxm[i] + j;
1739:       }
1740:     }
1741:     for (i=0; i<n; i++) {
1742:       for (j=0; j<cbs; j++) {
1743:         iidxn[i*cbs+j] = cbs*idxn[i] + j;
1744:       }
1745:     }
1746:     MatSetValues(mat,m*bs,iidxm,n*cbs,iidxn,v,addv);
1747:     PetscFree2(bufr,bufc);
1748:   }
1749:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
1750: #if defined(PETSC_HAVE_CUSP)
1751:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
1752:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
1753:   }
1754: #elif defined(PETSC_HAVE_VIENNACL)
1755:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
1756:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
1757:   }
1758: #elif defined(PETSC_HAVE_VECCUDA)
1759:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
1760:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
1761:   }
1762: #endif
1763:   return(0);
1764: }

1768: /*@
1769:    MatGetValues - Gets a block of values from a matrix.

1771:    Not Collective; currently only returns a local block

1773:    Input Parameters:
1774: +  mat - the matrix
1775: .  v - a logically two-dimensional array for storing the values
1776: .  m, idxm - the number of rows and their global indices
1777: -  n, idxn - the number of columns and their global indices

1779:    Notes:
1780:    The user must allocate space (m*n PetscScalars) for the values, v.
1781:    The values, v, are then returned in a row-oriented format,
1782:    analogous to that used by default in MatSetValues().

1784:    MatGetValues() uses 0-based row and column numbers in
1785:    Fortran as well as in C.

1787:    MatGetValues() requires that the matrix has been assembled
1788:    with MatAssemblyBegin()/MatAssemblyEnd().  Thus, calls to
1789:    MatSetValues() and MatGetValues() CANNOT be made in succession
1790:    without intermediate matrix assembly.

1792:    Negative row or column indices will be ignored and those locations in v[] will be
1793:    left unchanged.

1795:    Level: advanced

1797:    Concepts: matrices^accessing values

1799: .seealso: MatGetRow(), MatGetSubMatrices(), MatSetValues()
1800: @*/
1801: PetscErrorCode MatGetValues(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
1802: {

1808:   if (!m || !n) return(0);
1812:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
1813:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1814:   if (!mat->ops->getvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
1815:   MatCheckPreallocated(mat,1);

1817:   PetscLogEventBegin(MAT_GetValues,mat,0,0,0);
1818:   (*mat->ops->getvalues)(mat,m,idxm,n,idxn,v);
1819:   PetscLogEventEnd(MAT_GetValues,mat,0,0,0);
1820:   return(0);
1821: }

1825: /*@
1826:   MatSetValuesBatch - Adds (ADD_VALUES) many blocks of values into a matrix at once. The blocks must all be square and
1827:   the same size. Currently, this can only be called once and creates the given matrix.

1829:   Not Collective

1831:   Input Parameters:
1832: + mat - the matrix
1833: . nb - the number of blocks
1834: . bs - the number of rows (and columns) in each block
1835: . rows - a concatenation of the rows for each block
1836: - v - a concatenation of logically two-dimensional arrays of values

1838:   Notes:
1839:   In the future, we will extend this routine to handle rectangular blocks, and to allow multiple calls for a given matrix.

1841:   Level: advanced

1843:   Concepts: matrices^putting entries in

1845: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
1846:           InsertMode, INSERT_VALUES, ADD_VALUES, MatSetValues()
1847: @*/
1848: PetscErrorCode MatSetValuesBatch(Mat mat, PetscInt nb, PetscInt bs, PetscInt rows[], const PetscScalar v[])
1849: {

1857: #if defined(PETSC_USE_DEBUG)
1858:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
1859: #endif

1861:   PetscLogEventBegin(MAT_SetValuesBatch,mat,0,0,0);
1862:   if (mat->ops->setvaluesbatch) {
1863:     (*mat->ops->setvaluesbatch)(mat,nb,bs,rows,v);
1864:   } else {
1865:     PetscInt b;
1866:     for (b = 0; b < nb; ++b) {
1867:       MatSetValues(mat, bs, &rows[b*bs], bs, &rows[b*bs], &v[b*bs*bs], ADD_VALUES);
1868:     }
1869:   }
1870:   PetscLogEventEnd(MAT_SetValuesBatch,mat,0,0,0);
1871:   return(0);
1872: }

1876: /*@
1877:    MatSetLocalToGlobalMapping - Sets a local-to-global numbering for use by
1878:    the routine MatSetValuesLocal() to allow users to insert matrix entries
1879:    using a local (per-processor) numbering.

1881:    Not Collective

1883:    Input Parameters:
1884: +  x - the matrix
1885: .  rmapping - row mapping created with ISLocalToGlobalMappingCreate()   or ISLocalToGlobalMappingCreateIS()
1886: - cmapping - column mapping

1888:    Level: intermediate

1890:    Concepts: matrices^local to global mapping
1891:    Concepts: local to global mapping^for matrices

1893: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetValuesLocal()
1894: @*/
1895: PetscErrorCode MatSetLocalToGlobalMapping(Mat x,ISLocalToGlobalMapping rmapping,ISLocalToGlobalMapping cmapping)
1896: {


1905:   if (x->ops->setlocaltoglobalmapping) {
1906:     (*x->ops->setlocaltoglobalmapping)(x,rmapping,cmapping);
1907:   } else {
1908:     PetscLayoutSetISLocalToGlobalMapping(x->rmap,rmapping);
1909:     PetscLayoutSetISLocalToGlobalMapping(x->cmap,cmapping);
1910:   }
1911:   return(0);
1912: }


1917: /*@
1918:    MatGetLocalToGlobalMapping - Gets the local-to-global numbering set by MatSetLocalToGlobalMapping()

1920:    Not Collective

1922:    Input Parameters:
1923: .  A - the matrix

1925:    Output Parameters:
1926: + rmapping - row mapping
1927: - cmapping - column mapping

1929:    Level: advanced

1931:    Concepts: matrices^local to global mapping
1932:    Concepts: local to global mapping^for matrices

1934: .seealso:  MatSetValuesLocal()
1935: @*/
1936: PetscErrorCode MatGetLocalToGlobalMapping(Mat A,ISLocalToGlobalMapping *rmapping,ISLocalToGlobalMapping *cmapping)
1937: {
1943:   if (rmapping) *rmapping = A->rmap->mapping;
1944:   if (cmapping) *cmapping = A->cmap->mapping;
1945:   return(0);
1946: }

1950: /*@
1951:    MatGetLayouts - Gets the PetscLayout objects for rows and columns

1953:    Not Collective

1955:    Input Parameters:
1956: .  A - the matrix

1958:    Output Parameters:
1959: + rmap - row layout
1960: - cmap - column layout

1962:    Level: advanced

1964: .seealso:  MatCreateVecs(), MatGetLocalToGlobalMapping()
1965: @*/
1966: PetscErrorCode MatGetLayouts(Mat A,PetscLayout *rmap,PetscLayout *cmap)
1967: {
1973:   if (rmap) *rmap = A->rmap;
1974:   if (cmap) *cmap = A->cmap;
1975:   return(0);
1976: }

1980: /*@
1981:    MatSetValuesLocal - Inserts or adds values into certain locations of a matrix,
1982:    using a local ordering of the nodes.

1984:    Not Collective

1986:    Input Parameters:
1987: +  x - the matrix
1988: .  nrow, irow - number of rows and their local indices
1989: .  ncol, icol - number of columns and their local indices
1990: .  y -  a logically two-dimensional array of values
1991: -  addv - either INSERT_VALUES or ADD_VALUES, where
1992:    ADD_VALUES adds values to any existing entries, and
1993:    INSERT_VALUES replaces existing entries with new values

1995:    Notes:
1996:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
1997:       MatSetUp() before using this routine

1999:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetLocalToGlobalMapping() before using this routine

2001:    Calls to MatSetValuesLocal() with the INSERT_VALUES and ADD_VALUES
2002:    options cannot be mixed without intervening calls to the assembly
2003:    routines.

2005:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2006:    MUST be called after all calls to MatSetValuesLocal() have been completed.

2008:    Level: intermediate

2010:    Concepts: matrices^putting entries in with local numbering

2012: .seealso:  MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), MatSetLocalToGlobalMapping(),
2013:            MatSetValueLocal()
2014: @*/
2015: PetscErrorCode MatSetValuesLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2016: {

2022:   MatCheckPreallocated(mat,1);
2023:   if (!nrow || !ncol) return(0); /* no values to insert */
2027:   if (mat->insertmode == NOT_SET_VALUES) {
2028:     mat->insertmode = addv;
2029:   }
2030: #if defined(PETSC_USE_DEBUG)
2031:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2032:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2033:   if (!mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2034: #endif

2036:   if (mat->assembled) {
2037:     mat->was_assembled = PETSC_TRUE;
2038:     mat->assembled     = PETSC_FALSE;
2039:   }
2040:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2041:   if (mat->ops->setvalueslocal) {
2042:     (*mat->ops->setvalueslocal)(mat,nrow,irow,ncol,icol,y,addv);
2043:   } else {
2044:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2045:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2046:       irowm = buf; icolm = buf+nrow;
2047:     } else {
2048:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2049:       irowm = bufr; icolm = bufc;
2050:     }
2051:     ISLocalToGlobalMappingApply(mat->rmap->mapping,nrow,irow,irowm);
2052:     ISLocalToGlobalMappingApply(mat->cmap->mapping,ncol,icol,icolm);
2053:     MatSetValues(mat,nrow,irowm,ncol,icolm,y,addv);
2054:     PetscFree2(bufr,bufc);
2055:   }
2056:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2057: #if defined(PETSC_HAVE_CUSP)
2058:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2059:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2060:   }
2061: #elif defined(PETSC_HAVE_VIENNACL)
2062:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2063:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2064:   }
2065: #elif defined(PETSC_HAVE_VECCUDA)
2066:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2067:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2068:   }
2069: #endif
2070:   return(0);
2071: }

2075: /*@
2076:    MatSetValuesBlockedLocal - Inserts or adds values into certain locations of a matrix,
2077:    using a local ordering of the nodes a block at a time.

2079:    Not Collective

2081:    Input Parameters:
2082: +  x - the matrix
2083: .  nrow, irow - number of rows and their local indices
2084: .  ncol, icol - number of columns and their local indices
2085: .  y -  a logically two-dimensional array of values
2086: -  addv - either INSERT_VALUES or ADD_VALUES, where
2087:    ADD_VALUES adds values to any existing entries, and
2088:    INSERT_VALUES replaces existing entries with new values

2090:    Notes:
2091:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatXXXXSetPreallocation() or
2092:       MatSetUp() before using this routine

2094:    If you create the matrix yourself (that is not with a call to DMCreateMatrix()) then you MUST call MatSetBlockSize() and MatSetLocalToGlobalMapping()
2095:       before using this routineBefore calling MatSetValuesLocal(), the user must first set the

2097:    Calls to MatSetValuesBlockedLocal() with the INSERT_VALUES and ADD_VALUES
2098:    options cannot be mixed without intervening calls to the assembly
2099:    routines.

2101:    These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
2102:    MUST be called after all calls to MatSetValuesBlockedLocal() have been completed.

2104:    Level: intermediate

2106:    Concepts: matrices^putting blocked values in with local numbering

2108: .seealso:  MatSetBlockSize(), MatSetLocalToGlobalMapping(), MatAssemblyBegin(), MatAssemblyEnd(),
2109:            MatSetValuesLocal(),  MatSetValuesBlocked()
2110: @*/
2111: PetscErrorCode MatSetValuesBlockedLocal(Mat mat,PetscInt nrow,const PetscInt irow[],PetscInt ncol,const PetscInt icol[],const PetscScalar y[],InsertMode addv)
2112: {

2118:   MatCheckPreallocated(mat,1);
2119:   if (!nrow || !ncol) return(0); /* no values to insert */
2123:   if (mat->insertmode == NOT_SET_VALUES) {
2124:     mat->insertmode = addv;
2125:   }
2126: #if defined(PETSC_USE_DEBUG)
2127:   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
2128:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2129:   if (!mat->ops->setvaluesblockedlocal && !mat->ops->setvaluesblocked && !mat->ops->setvalueslocal && !mat->ops->setvalues) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2130: #endif

2132:   if (mat->assembled) {
2133:     mat->was_assembled = PETSC_TRUE;
2134:     mat->assembled     = PETSC_FALSE;
2135:   }
2136:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
2137:   if (mat->ops->setvaluesblockedlocal) {
2138:     (*mat->ops->setvaluesblockedlocal)(mat,nrow,irow,ncol,icol,y,addv);
2139:   } else {
2140:     PetscInt buf[8192],*bufr=0,*bufc=0,*irowm,*icolm;
2141:     if ((nrow+ncol) <= (PetscInt)(sizeof(buf)/sizeof(PetscInt))) {
2142:       irowm = buf; icolm = buf + nrow;
2143:     } else {
2144:       PetscMalloc2(nrow,&bufr,ncol,&bufc);
2145:       irowm = bufr; icolm = bufc;
2146:     }
2147:     ISLocalToGlobalMappingApplyBlock(mat->rmap->mapping,nrow,irow,irowm);
2148:     ISLocalToGlobalMappingApplyBlock(mat->cmap->mapping,ncol,icol,icolm);
2149:     MatSetValuesBlocked(mat,nrow,irowm,ncol,icolm,y,addv);
2150:     PetscFree2(bufr,bufc);
2151:   }
2152:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
2153: #if defined(PETSC_HAVE_CUSP)
2154:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
2155:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
2156:   }
2157: #elif defined(PETSC_HAVE_VIENNACL)
2158:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
2159:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
2160:   }
2161: #elif defined(PETSC_HAVE_VECCUDA)
2162:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
2163:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
2164:   }
2165: #endif
2166:   return(0);
2167: }

2171: /*@
2172:    MatMultDiagonalBlock - Computes the matrix-vector product, y = Dx. Where D is defined by the inode or block structure of the diagonal

2174:    Collective on Mat and Vec

2176:    Input Parameters:
2177: +  mat - the matrix
2178: -  x   - the vector to be multiplied

2180:    Output Parameters:
2181: .  y - the result

2183:    Notes:
2184:    The vectors x and y cannot be the same.  I.e., one cannot
2185:    call MatMult(A,y,y).

2187:    Level: developer

2189:    Concepts: matrix-vector product

2191: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2192: @*/
2193: PetscErrorCode MatMultDiagonalBlock(Mat mat,Vec x,Vec y)
2194: {


2203:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2204:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2205:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2206:   MatCheckPreallocated(mat,1);

2208:   if (!mat->ops->multdiagonalblock) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2209:   (*mat->ops->multdiagonalblock)(mat,x,y);
2210:   PetscObjectStateIncrease((PetscObject)y);
2211:   return(0);
2212: }

2214: /* --------------------------------------------------------*/
2217: /*@
2218:    MatMult - Computes the matrix-vector product, y = Ax.

2220:    Neighbor-wise Collective on Mat and Vec

2222:    Input Parameters:
2223: +  mat - the matrix
2224: -  x   - the vector to be multiplied

2226:    Output Parameters:
2227: .  y - the result

2229:    Notes:
2230:    The vectors x and y cannot be the same.  I.e., one cannot
2231:    call MatMult(A,y,y).

2233:    Level: beginner

2235:    Concepts: matrix-vector product

2237: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2238: @*/
2239: PetscErrorCode MatMult(Mat mat,Vec x,Vec y)
2240: {

2248:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2249:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2250:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2251: #if !defined(PETSC_HAVE_CONSTRAINTS)
2252:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2253:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2254:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);
2255: #endif
2256:   VecLocked(y,3);
2257:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2258:   MatCheckPreallocated(mat,1);

2260:   VecLockPush(x);
2261:   if (!mat->ops->mult) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply defined");
2262:   PetscLogEventBegin(MAT_Mult,mat,x,y,0);
2263:   (*mat->ops->mult)(mat,x,y);
2264:   PetscLogEventEnd(MAT_Mult,mat,x,y,0);
2265:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2266:   VecLockPop(x);
2267:   return(0);
2268: }

2272: /*@
2273:    MatMultTranspose - Computes matrix transpose times a vector.

2275:    Neighbor-wise Collective on Mat and Vec

2277:    Input Parameters:
2278: +  mat - the matrix
2279: -  x   - the vector to be multilplied

2281:    Output Parameters:
2282: .  y - the result

2284:    Notes:
2285:    The vectors x and y cannot be the same.  I.e., one cannot
2286:    call MatMultTranspose(A,y,y).

2288:    For complex numbers this does NOT compute the Hermitian (complex conjugate) transpose multiple,
2289:    use MatMultHermitianTranspose()

2291:    Level: beginner

2293:    Concepts: matrix vector product^transpose

2295: .seealso: MatMult(), MatMultAdd(), MatMultTransposeAdd(), MatMultHermitianTranspose(), MatTranspose()
2296: @*/
2297: PetscErrorCode MatMultTranspose(Mat mat,Vec x,Vec y)
2298: {


2307:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2308:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2309:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2310: #if !defined(PETSC_HAVE_CONSTRAINTS)
2311:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2312:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2313: #endif
2314:   if (mat->erroriffailure) {VecValidValues(x,2,PETSC_TRUE);}
2315:   MatCheckPreallocated(mat,1);

2317:   if (!mat->ops->multtranspose) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a multiply tranpose defined");
2318:   PetscLogEventBegin(MAT_MultTranspose,mat,x,y,0);
2319:   VecLockPush(x);
2320:   (*mat->ops->multtranspose)(mat,x,y);
2321:   VecLockPop(x);
2322:   PetscLogEventEnd(MAT_MultTranspose,mat,x,y,0);
2323:   PetscObjectStateIncrease((PetscObject)y);
2324:   if (mat->erroriffailure) {VecValidValues(y,3,PETSC_FALSE);}
2325:   return(0);
2326: }

2330: /*@
2331:    MatMultHermitianTranspose - Computes matrix Hermitian transpose times a vector.

2333:    Neighbor-wise Collective on Mat and Vec

2335:    Input Parameters:
2336: +  mat - the matrix
2337: -  x   - the vector to be multilplied

2339:    Output Parameters:
2340: .  y - the result

2342:    Notes:
2343:    The vectors x and y cannot be the same.  I.e., one cannot
2344:    call MatMultHermitianTranspose(A,y,y).

2346:    Also called the conjugate transpose, complex conjugate transpose, or adjoint.

2348:    For real numbers MatMultTranspose() and MatMultHermitianTranspose() are identical.

2350:    Level: beginner

2352:    Concepts: matrix vector product^transpose

2354: .seealso: MatMult(), MatMultAdd(), MatMultHermitianTransposeAdd(), MatMultTranspose()
2355: @*/
2356: PetscErrorCode MatMultHermitianTranspose(Mat mat,Vec x,Vec y)
2357: {
2359:   Vec            w;


2367:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2368:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2369:   if (x == y) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2370: #if !defined(PETSC_HAVE_CONSTRAINTS)
2371:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
2372:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
2373: #endif
2374:   MatCheckPreallocated(mat,1);

2376:   PetscLogEventBegin(MAT_MultHermitianTranspose,mat,x,y,0);
2377:   if (mat->ops->multhermitiantranspose) {
2378:     VecLockPush(x);
2379:     (*mat->ops->multhermitiantranspose)(mat,x,y);
2380:     VecLockPop(x);
2381:   } else {
2382:     VecDuplicate(x,&w);
2383:     VecCopy(x,w);
2384:     VecConjugate(w);
2385:     MatMultTranspose(mat,w,y);
2386:     VecDestroy(&w);
2387:     VecConjugate(y);
2388:   }
2389:   PetscLogEventEnd(MAT_MultHermitianTranspose,mat,x,y,0);
2390:   PetscObjectStateIncrease((PetscObject)y);
2391:   return(0);
2392: }

2396: /*@
2397:     MatMultAdd -  Computes v3 = v2 + A * v1.

2399:     Neighbor-wise Collective on Mat and Vec

2401:     Input Parameters:
2402: +   mat - the matrix
2403: -   v1, v2 - the vectors

2405:     Output Parameters:
2406: .   v3 - the result

2408:     Notes:
2409:     The vectors v1 and v3 cannot be the same.  I.e., one cannot
2410:     call MatMultAdd(A,v1,v2,v1).

2412:     Level: beginner

2414:     Concepts: matrix vector product^addition

2416: .seealso: MatMultTranspose(), MatMult(), MatMultTransposeAdd()
2417: @*/
2418: PetscErrorCode MatMultAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2419: {


2429:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2430:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2431:   if (mat->cmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->cmap->N,v1->map->N);
2432:   /* if (mat->rmap->N != v2->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->rmap->N,v2->map->N);
2433:      if (mat->rmap->N != v3->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->rmap->N,v3->map->N); */
2434:   if (mat->rmap->n != v3->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: local dim %D %D",mat->rmap->n,v3->map->n);
2435:   if (mat->rmap->n != v2->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: local dim %D %D",mat->rmap->n,v2->map->n);
2436:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2437:   MatCheckPreallocated(mat,1);

2439:   if (!mat->ops->multadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No MatMultAdd() for matrix type '%s'",((PetscObject)mat)->type_name);
2440:   PetscLogEventBegin(MAT_MultAdd,mat,v1,v2,v3);
2441:   VecLockPush(v1);
2442:   (*mat->ops->multadd)(mat,v1,v2,v3);
2443:   VecLockPop(v1);
2444:   PetscLogEventEnd(MAT_MultAdd,mat,v1,v2,v3);
2445:   PetscObjectStateIncrease((PetscObject)v3);
2446:   return(0);
2447: }

2451: /*@
2452:    MatMultTransposeAdd - Computes v3 = v2 + A' * v1.

2454:    Neighbor-wise Collective on Mat and Vec

2456:    Input Parameters:
2457: +  mat - the matrix
2458: -  v1, v2 - the vectors

2460:    Output Parameters:
2461: .  v3 - the result

2463:    Notes:
2464:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2465:    call MatMultTransposeAdd(A,v1,v2,v1).

2467:    Level: beginner

2469:    Concepts: matrix vector product^transpose and addition

2471: .seealso: MatMultTranspose(), MatMultAdd(), MatMult()
2472: @*/
2473: PetscErrorCode MatMultTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2474: {


2484:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2485:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2486:   if (!mat->ops->multtransposeadd) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2487:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2488:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2489:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2490:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2491:   MatCheckPreallocated(mat,1);

2493:   PetscLogEventBegin(MAT_MultTransposeAdd,mat,v1,v2,v3);
2494:   VecLockPush(v1);
2495:   (*mat->ops->multtransposeadd)(mat,v1,v2,v3);
2496:   VecLockPop(v1);
2497:   PetscLogEventEnd(MAT_MultTransposeAdd,mat,v1,v2,v3);
2498:   PetscObjectStateIncrease((PetscObject)v3);
2499:   return(0);
2500: }

2504: /*@
2505:    MatMultHermitianTransposeAdd - Computes v3 = v2 + A^H * v1.

2507:    Neighbor-wise Collective on Mat and Vec

2509:    Input Parameters:
2510: +  mat - the matrix
2511: -  v1, v2 - the vectors

2513:    Output Parameters:
2514: .  v3 - the result

2516:    Notes:
2517:    The vectors v1 and v3 cannot be the same.  I.e., one cannot
2518:    call MatMultHermitianTransposeAdd(A,v1,v2,v1).

2520:    Level: beginner

2522:    Concepts: matrix vector product^transpose and addition

2524: .seealso: MatMultHermitianTranspose(), MatMultTranspose(), MatMultAdd(), MatMult()
2525: @*/
2526: PetscErrorCode MatMultHermitianTransposeAdd(Mat mat,Vec v1,Vec v2,Vec v3)
2527: {


2537:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2538:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2539:   if (v1 == v3) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"v1 and v3 must be different vectors");
2540:   if (mat->rmap->N != v1->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v1: global dim %D %D",mat->rmap->N,v1->map->N);
2541:   if (mat->cmap->N != v2->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v2: global dim %D %D",mat->cmap->N,v2->map->N);
2542:   if (mat->cmap->N != v3->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec v3: global dim %D %D",mat->cmap->N,v3->map->N);
2543:   MatCheckPreallocated(mat,1);

2545:   PetscLogEventBegin(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2546:   VecLockPush(v1);
2547:   if (mat->ops->multhermitiantransposeadd) {
2548:     (*mat->ops->multhermitiantransposeadd)(mat,v1,v2,v3);
2549:    } else {
2550:     Vec w,z;
2551:     VecDuplicate(v1,&w);
2552:     VecCopy(v1,w);
2553:     VecConjugate(w);
2554:     VecDuplicate(v3,&z);
2555:     MatMultTranspose(mat,w,z);
2556:     VecDestroy(&w);
2557:     VecConjugate(z);
2558:     VecWAXPY(v3,1.0,v2,z);
2559:     VecDestroy(&z);
2560:   }
2561:   VecLockPop(v1);
2562:   PetscLogEventEnd(MAT_MultHermitianTransposeAdd,mat,v1,v2,v3);
2563:   PetscObjectStateIncrease((PetscObject)v3);
2564:   return(0);
2565: }

2569: /*@
2570:    MatMultConstrained - The inner multiplication routine for a
2571:    constrained matrix P^T A P.

2573:    Neighbor-wise Collective on Mat and Vec

2575:    Input Parameters:
2576: +  mat - the matrix
2577: -  x   - the vector to be multilplied

2579:    Output Parameters:
2580: .  y - the result

2582:    Notes:
2583:    The vectors x and y cannot be the same.  I.e., one cannot
2584:    call MatMult(A,y,y).

2586:    Level: beginner

2588: .keywords: matrix, multiply, matrix-vector product, constraint
2589: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2590: @*/
2591: PetscErrorCode MatMultConstrained(Mat mat,Vec x,Vec y)
2592: {

2599:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2600:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2601:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2602:   if (mat->cmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2603:   if (mat->rmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
2604:   if (mat->rmap->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: local dim %D %D",mat->rmap->n,y->map->n);

2606:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2607:   VecLockPush(x);
2608:   (*mat->ops->multconstrained)(mat,x,y);
2609:   VecLockPop(x);
2610:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2611:   PetscObjectStateIncrease((PetscObject)y);
2612:   return(0);
2613: }

2617: /*@
2618:    MatMultTransposeConstrained - The inner multiplication routine for a
2619:    constrained matrix P^T A^T P.

2621:    Neighbor-wise Collective on Mat and Vec

2623:    Input Parameters:
2624: +  mat - the matrix
2625: -  x   - the vector to be multilplied

2627:    Output Parameters:
2628: .  y - the result

2630:    Notes:
2631:    The vectors x and y cannot be the same.  I.e., one cannot
2632:    call MatMult(A,y,y).

2634:    Level: beginner

2636: .keywords: matrix, multiply, matrix-vector product, constraint
2637: .seealso: MatMult(), MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
2638: @*/
2639: PetscErrorCode MatMultTransposeConstrained(Mat mat,Vec x,Vec y)
2640: {

2647:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2648:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2649:   if (x == y) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"x and y must be different vectors");
2650:   if (mat->rmap->N != x->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
2651:   if (mat->cmap->N != y->map->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);

2653:   PetscLogEventBegin(MAT_MultConstrained,mat,x,y,0);
2654:   (*mat->ops->multtransposeconstrained)(mat,x,y);
2655:   PetscLogEventEnd(MAT_MultConstrained,mat,x,y,0);
2656:   PetscObjectStateIncrease((PetscObject)y);
2657:   return(0);
2658: }

2662: /*@C
2663:    MatGetFactorType - gets the type of factorization it is

2665:    Note Collective
2666:    as the flag

2668:    Input Parameters:
2669: .  mat - the matrix

2671:    Output Parameters:
2672: .  t - the type, one of MAT_FACTOR_NONE, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ILU, MAT_FACTOR_ICC,MAT_FACTOR_ILUDT

2674:     Level: intermediate

2676: .seealso:    MatFactorType, MatGetFactor()
2677: @*/
2678: PetscErrorCode MatGetFactorType(Mat mat,MatFactorType *t)
2679: {
2683:   *t = mat->factortype;
2684:   return(0);
2685: }

2687: /* ------------------------------------------------------------*/
2690: /*@C
2691:    MatGetInfo - Returns information about matrix storage (number of
2692:    nonzeros, memory, etc.).

2694:    Collective on Mat if MAT_GLOBAL_MAX or MAT_GLOBAL_SUM is used as the flag

2696:    Input Parameters:
2697: .  mat - the matrix

2699:    Output Parameters:
2700: +  flag - flag indicating the type of parameters to be returned
2701:    (MAT_LOCAL - local matrix, MAT_GLOBAL_MAX - maximum over all processors,
2702:    MAT_GLOBAL_SUM - sum over all processors)
2703: -  info - matrix information context

2705:    Notes:
2706:    The MatInfo context contains a variety of matrix data, including
2707:    number of nonzeros allocated and used, number of mallocs during
2708:    matrix assembly, etc.  Additional information for factored matrices
2709:    is provided (such as the fill ratio, number of mallocs during
2710:    factorization, etc.).  Much of this info is printed to PETSC_STDOUT
2711:    when using the runtime options
2712: $       -info -mat_view ::ascii_info

2714:    Example for C/C++ Users:
2715:    See the file ${PETSC_DIR}/include/petscmat.h for a complete list of
2716:    data within the MatInfo context.  For example,
2717: .vb
2718:       MatInfo info;
2719:       Mat     A;
2720:       double  mal, nz_a, nz_u;

2722:       MatGetInfo(A,MAT_LOCAL,&info);
2723:       mal  = info.mallocs;
2724:       nz_a = info.nz_allocated;
2725: .ve

2727:    Example for Fortran Users:
2728:    Fortran users should declare info as a double precision
2729:    array of dimension MAT_INFO_SIZE, and then extract the parameters
2730:    of interest.  See the file ${PETSC_DIR}/include/petsc/finclude/petscmat.h
2731:    a complete list of parameter names.
2732: .vb
2733:       double  precision info(MAT_INFO_SIZE)
2734:       double  precision mal, nz_a
2735:       Mat     A
2736:       integer ierr

2738:       call MatGetInfo(A,MAT_LOCAL,info,ierr)
2739:       mal = info(MAT_INFO_MALLOCS)
2740:       nz_a = info(MAT_INFO_NZ_ALLOCATED)
2741: .ve

2743:     Level: intermediate

2745:     Concepts: matrices^getting information on

2747:     Developer Note: fortran interface is not autogenerated as the f90
2748:     interface defintion cannot be generated correctly [due to MatInfo]

2750: .seealso: MatStashGetInfo()

2752: @*/
2753: PetscErrorCode MatGetInfo(Mat mat,MatInfoType flag,MatInfo *info)
2754: {

2761:   if (!mat->ops->getinfo) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2762:   MatCheckPreallocated(mat,1);
2763:   (*mat->ops->getinfo)(mat,flag,info);
2764:   return(0);
2765: }

2767: /* ----------------------------------------------------------*/

2771: /*@C
2772:    MatLUFactor - Performs in-place LU factorization of matrix.

2774:    Collective on Mat

2776:    Input Parameters:
2777: +  mat - the matrix
2778: .  row - row permutation
2779: .  col - column permutation
2780: -  info - options for factorization, includes
2781: $          fill - expected fill as ratio of original fill.
2782: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2783: $                   Run with the option -info to determine an optimal value to use

2785:    Notes:
2786:    Most users should employ the simplified KSP interface for linear solvers
2787:    instead of working directly with matrix algebra routines such as this.
2788:    See, e.g., KSPCreate().

2790:    This changes the state of the matrix to a factored matrix; it cannot be used
2791:    for example with MatSetValues() unless one first calls MatSetUnfactored().

2793:    Level: developer

2795:    Concepts: matrices^LU factorization

2797: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(),
2798:           MatGetOrdering(), MatSetUnfactored(), MatFactorInfo, MatGetFactor()

2800:     Developer Note: fortran interface is not autogenerated as the f90
2801:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2803: @*/
2804: PetscErrorCode MatLUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2805: {
2807:   MatFactorInfo  tinfo;

2815:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2816:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2817:   if (!mat->ops->lufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2818:   MatCheckPreallocated(mat,1);
2819:   if (!info) {
2820:     MatFactorInfoInitialize(&tinfo);
2821:     info = &tinfo;
2822:   }

2824:   PetscLogEventBegin(MAT_LUFactor,mat,row,col,0);
2825:   (*mat->ops->lufactor)(mat,row,col,info);
2826:   PetscLogEventEnd(MAT_LUFactor,mat,row,col,0);
2827:   PetscObjectStateIncrease((PetscObject)mat);
2828:   return(0);
2829: }

2833: /*@C
2834:    MatILUFactor - Performs in-place ILU factorization of matrix.

2836:    Collective on Mat

2838:    Input Parameters:
2839: +  mat - the matrix
2840: .  row - row permutation
2841: .  col - column permutation
2842: -  info - structure containing
2843: $      levels - number of levels of fill.
2844: $      expected fill - as ratio of original fill.
2845: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
2846:                 missing diagonal entries)

2848:    Notes:
2849:    Probably really in-place only when level of fill is zero, otherwise allocates
2850:    new space to store factored matrix and deletes previous memory.

2852:    Most users should employ the simplified KSP interface for linear solvers
2853:    instead of working directly with matrix algebra routines such as this.
2854:    See, e.g., KSPCreate().

2856:    Level: developer

2858:    Concepts: matrices^ILU factorization

2860: .seealso: MatILUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

2862:     Developer Note: fortran interface is not autogenerated as the f90
2863:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2865: @*/
2866: PetscErrorCode MatILUFactor(Mat mat,IS row,IS col,const MatFactorInfo *info)
2867: {

2876:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
2877:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2878:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2879:   if (!mat->ops->ilufactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
2880:   MatCheckPreallocated(mat,1);

2882:   PetscLogEventBegin(MAT_ILUFactor,mat,row,col,0);
2883:   (*mat->ops->ilufactor)(mat,row,col,info);
2884:   PetscLogEventEnd(MAT_ILUFactor,mat,row,col,0);
2885:   PetscObjectStateIncrease((PetscObject)mat);
2886:   return(0);
2887: }

2891: /*@C
2892:    MatLUFactorSymbolic - Performs symbolic LU factorization of matrix.
2893:    Call this routine before calling MatLUFactorNumeric().

2895:    Collective on Mat

2897:    Input Parameters:
2898: +  fact - the factor matrix obtained with MatGetFactor()
2899: .  mat - the matrix
2900: .  row, col - row and column permutations
2901: -  info - options for factorization, includes
2902: $          fill - expected fill as ratio of original fill.
2903: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
2904: $                   Run with the option -info to determine an optimal value to use


2907:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

2909:    Most users should employ the simplified KSP interface for linear solvers
2910:    instead of working directly with matrix algebra routines such as this.
2911:    See, e.g., KSPCreate().

2913:    Level: developer

2915:    Concepts: matrices^LU symbolic factorization

2917: .seealso: MatLUFactor(), MatLUFactorNumeric(), MatCholeskyFactor(), MatFactorInfo, MatFactorInfoInitialize()

2919:     Developer Note: fortran interface is not autogenerated as the f90
2920:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2922: @*/
2923: PetscErrorCode MatLUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
2924: {

2934:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2935:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
2936:   if (!(fact)->ops->lufactorsymbolic) {
2937:     const MatSolverPackage spackage;
2938:     MatFactorGetSolverPackage(fact,&spackage);
2939:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic LU using solver package %s",((PetscObject)mat)->type_name,spackage);
2940:   }
2941:   MatCheckPreallocated(mat,2);

2943:   PetscLogEventBegin(MAT_LUFactorSymbolic,mat,row,col,0);
2944:   (fact->ops->lufactorsymbolic)(fact,mat,row,col,info);
2945:   PetscLogEventEnd(MAT_LUFactorSymbolic,mat,row,col,0);
2946:   PetscObjectStateIncrease((PetscObject)fact);
2947:   return(0);
2948: }

2952: /*@C
2953:    MatLUFactorNumeric - Performs numeric LU factorization of a matrix.
2954:    Call this routine after first calling MatLUFactorSymbolic().

2956:    Collective on Mat

2958:    Input Parameters:
2959: +  fact - the factor matrix obtained with MatGetFactor()
2960: .  mat - the matrix
2961: -  info - options for factorization

2963:    Notes:
2964:    See MatLUFactor() for in-place factorization.  See
2965:    MatCholeskyFactorNumeric() for the symmetric, positive definite case.

2967:    Most users should employ the simplified KSP interface for linear solvers
2968:    instead of working directly with matrix algebra routines such as this.
2969:    See, e.g., KSPCreate().

2971:    Level: developer

2973:    Concepts: matrices^LU numeric factorization

2975: .seealso: MatLUFactorSymbolic(), MatLUFactor(), MatCholeskyFactor()

2977:     Developer Note: fortran interface is not autogenerated as the f90
2978:     interface defintion cannot be generated correctly [due to MatFactorInfo]

2980: @*/
2981: PetscErrorCode MatLUFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
2982: {

2990:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
2991:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dimensions are different %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);

2993:   if (!(fact)->ops->lufactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric LU",((PetscObject)mat)->type_name);
2994:   MatCheckPreallocated(mat,2);
2995:   PetscLogEventBegin(MAT_LUFactorNumeric,mat,fact,0,0);
2996:   (fact->ops->lufactornumeric)(fact,mat,info);
2997:   PetscLogEventEnd(MAT_LUFactorNumeric,mat,fact,0,0);
2998:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
2999:   PetscObjectStateIncrease((PetscObject)fact);
3000:   return(0);
3001: }

3005: /*@C
3006:    MatCholeskyFactor - Performs in-place Cholesky factorization of a
3007:    symmetric matrix.

3009:    Collective on Mat

3011:    Input Parameters:
3012: +  mat - the matrix
3013: .  perm - row and column permutations
3014: -  f - expected fill as ratio of original fill

3016:    Notes:
3017:    See MatLUFactor() for the nonsymmetric case.  See also
3018:    MatCholeskyFactorSymbolic(), and MatCholeskyFactorNumeric().

3020:    Most users should employ the simplified KSP interface for linear solvers
3021:    instead of working directly with matrix algebra routines such as this.
3022:    See, e.g., KSPCreate().

3024:    Level: developer

3026:    Concepts: matrices^Cholesky factorization

3028: .seealso: MatLUFactor(), MatCholeskyFactorSymbolic(), MatCholeskyFactorNumeric()
3029:           MatGetOrdering()

3031:     Developer Note: fortran interface is not autogenerated as the f90
3032:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3034: @*/
3035: PetscErrorCode MatCholeskyFactor(Mat mat,IS perm,const MatFactorInfo *info)
3036: {

3044:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3045:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3046:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3047:   if (!mat->ops->choleskyfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3048:   MatCheckPreallocated(mat,1);

3050:   PetscLogEventBegin(MAT_CholeskyFactor,mat,perm,0,0);
3051:   (*mat->ops->choleskyfactor)(mat,perm,info);
3052:   PetscLogEventEnd(MAT_CholeskyFactor,mat,perm,0,0);
3053:   PetscObjectStateIncrease((PetscObject)mat);
3054:   return(0);
3055: }

3059: /*@C
3060:    MatCholeskyFactorSymbolic - Performs symbolic Cholesky factorization
3061:    of a symmetric matrix.

3063:    Collective on Mat

3065:    Input Parameters:
3066: +  fact - the factor matrix obtained with MatGetFactor()
3067: .  mat - the matrix
3068: .  perm - row and column permutations
3069: -  info - options for factorization, includes
3070: $          fill - expected fill as ratio of original fill.
3071: $          dtcol - pivot tolerance (0 no pivot, 1 full column pivoting)
3072: $                   Run with the option -info to determine an optimal value to use

3074:    Notes:
3075:    See MatLUFactorSymbolic() for the nonsymmetric case.  See also
3076:    MatCholeskyFactor() and MatCholeskyFactorNumeric().

3078:    Most users should employ the simplified KSP interface for linear solvers
3079:    instead of working directly with matrix algebra routines such as this.
3080:    See, e.g., KSPCreate().

3082:    Level: developer

3084:    Concepts: matrices^Cholesky symbolic factorization

3086: .seealso: MatLUFactorSymbolic(), MatCholeskyFactor(), MatCholeskyFactorNumeric()
3087:           MatGetOrdering()

3089:     Developer Note: fortran interface is not autogenerated as the f90
3090:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3092: @*/
3093: PetscErrorCode MatCholeskyFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
3094: {

3103:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"Matrix must be square");
3104:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3105:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3106:   if (!(fact)->ops->choleskyfactorsymbolic) {
3107:     const MatSolverPackage spackage;
3108:     MatFactorGetSolverPackage(fact,&spackage);
3109:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s symbolic factor Cholesky using solver package %s",((PetscObject)mat)->type_name,spackage);
3110:   }
3111:   MatCheckPreallocated(mat,2);

3113:   PetscLogEventBegin(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3114:   (fact->ops->choleskyfactorsymbolic)(fact,mat,perm,info);
3115:   PetscLogEventEnd(MAT_CholeskyFactorSymbolic,mat,perm,0,0);
3116:   PetscObjectStateIncrease((PetscObject)fact);
3117:   return(0);
3118: }

3122: /*@C
3123:    MatCholeskyFactorNumeric - Performs numeric Cholesky factorization
3124:    of a symmetric matrix. Call this routine after first calling
3125:    MatCholeskyFactorSymbolic().

3127:    Collective on Mat

3129:    Input Parameters:
3130: +  fact - the factor matrix obtained with MatGetFactor()
3131: .  mat - the initial matrix
3132: .  info - options for factorization
3133: -  fact - the symbolic factor of mat


3136:    Notes:
3137:    Most users should employ the simplified KSP interface for linear solvers
3138:    instead of working directly with matrix algebra routines such as this.
3139:    See, e.g., KSPCreate().

3141:    Level: developer

3143:    Concepts: matrices^Cholesky numeric factorization

3145: .seealso: MatCholeskyFactorSymbolic(), MatCholeskyFactor(), MatLUFactorNumeric()

3147:     Developer Note: fortran interface is not autogenerated as the f90
3148:     interface defintion cannot be generated correctly [due to MatFactorInfo]

3150: @*/
3151: PetscErrorCode MatCholeskyFactorNumeric(Mat fact,Mat mat,const MatFactorInfo *info)
3152: {

3160:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3161:   if (!(fact)->ops->choleskyfactornumeric) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s numeric factor Cholesky",((PetscObject)mat)->type_name);
3162:   if (mat->rmap->N != (fact)->rmap->N || mat->cmap->N != (fact)->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Mat fact: global dim %D should = %D %D should = %D",mat->rmap->N,(fact)->rmap->N,mat->cmap->N,(fact)->cmap->N);
3163:   MatCheckPreallocated(mat,2);

3165:   PetscLogEventBegin(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3166:   (fact->ops->choleskyfactornumeric)(fact,mat,info);
3167:   PetscLogEventEnd(MAT_CholeskyFactorNumeric,mat,fact,0,0);
3168:   MatViewFromOptions(fact,NULL,"-mat_factor_view");
3169:   PetscObjectStateIncrease((PetscObject)fact);
3170:   return(0);
3171: }

3173: /* ----------------------------------------------------------------*/
3176: /*@
3177:    MatSolve - Solves A x = b, given a factored matrix.

3179:    Neighbor-wise Collective on Mat and Vec

3181:    Input Parameters:
3182: +  mat - the factored matrix
3183: -  b - the right-hand-side vector

3185:    Output Parameter:
3186: .  x - the result vector

3188:    Notes:
3189:    The vectors b and x cannot be the same.  I.e., one cannot
3190:    call MatSolve(A,x,x).

3192:    Notes:
3193:    Most users should employ the simplified KSP interface for linear solvers
3194:    instead of working directly with matrix algebra routines such as this.
3195:    See, e.g., KSPCreate().

3197:    Level: developer

3199:    Concepts: matrices^triangular solves

3201: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd()
3202: @*/
3203: PetscErrorCode MatSolve(Mat mat,Vec b,Vec x)
3204: {

3214:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3215:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3216:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3217:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3218:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3219:   if (!mat->rmap->N && !mat->cmap->N) return(0);
3220:   if (!mat->ops->solve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3221:   MatCheckPreallocated(mat,1);

3223:   PetscLogEventBegin(MAT_Solve,mat,b,x,0);
3224:   if (mat->errortype) {
3225:     PetscInfo1(mat,"MatFactorError %D\n",mat->errortype);
3226:     VecSetInf(x);
3227:   } else {
3228:     (*mat->ops->solve)(mat,b,x);
3229:   }
3230:   PetscLogEventEnd(MAT_Solve,mat,b,x,0);
3231:   PetscObjectStateIncrease((PetscObject)x);
3232:   return(0);
3233: }

3237: PetscErrorCode MatMatSolve_Basic(Mat A,Mat B,Mat X)
3238: {
3240:   Vec            b,x;
3241:   PetscInt       m,N,i;
3242:   PetscScalar    *bb,*xx;
3243:   PetscBool      flg;

3246:   PetscObjectTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3247:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
3248:   PetscObjectTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,NULL);
3249:   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix");

3251:   MatDenseGetArray(B,&bb);
3252:   MatDenseGetArray(X,&xx);
3253:   MatGetLocalSize(B,&m,NULL);  /* number local rows */
3254:   MatGetSize(B,NULL,&N);       /* total columns in dense matrix */
3255:   MatCreateVecs(A,&x,&b);
3256:   for (i=0; i<N; i++) {
3257:     VecPlaceArray(b,bb + i*m);
3258:     VecPlaceArray(x,xx + i*m);
3259:     MatSolve(A,b,x);
3260:     VecResetArray(x);
3261:     VecResetArray(b);
3262:   }
3263:   VecDestroy(&b);
3264:   VecDestroy(&x);
3265:   MatDenseRestoreArray(B,&bb);
3266:   MatDenseRestoreArray(X,&xx);
3267:   return(0);
3268: }

3272: /*@
3273:    MatMatSolve - Solves A X = B, given a factored matrix.

3275:    Neighbor-wise Collective on Mat

3277:    Input Parameters:
3278: +  A - the factored matrix
3279: -  B - the right-hand-side matrix  (dense matrix)

3281:    Output Parameter:
3282: .  X - the result matrix (dense matrix)

3284:    Notes:
3285:    The matrices b and x cannot be the same.  I.e., one cannot
3286:    call MatMatSolve(A,x,x).

3288:    Notes:
3289:    Most users should usually employ the simplified KSP interface for linear solvers
3290:    instead of working directly with matrix algebra routines such as this.
3291:    See, e.g., KSPCreate(). However KSP can only solve for one vector (column of X)
3292:    at a time.

3294:    When using SuperLU_Dist as a parallel solver PETSc will use the SuperLU_Dist functionality to solve multiple right hand sides simultaneously. For MUMPS
3295:    it calls a separate solve for each right hand side since MUMPS does not yet support distributed right hand sides.

3297:    Since the resulting matrix X must always be dense we do not support sparse representation of the matrix B.

3299:    Level: developer

3301:    Concepts: matrices^triangular solves

3303: .seealso: MatMatSolveAdd(), MatMatSolveTranspose(), MatMatSolveTransposeAdd(), MatLUFactor(), MatCholeskyFactor()
3304: @*/
3305: PetscErrorCode MatMatSolve(Mat A,Mat B,Mat X)
3306: {

3316:   if (X == B) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_IDN,"X and B must be different matrices");
3317:   if (!A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3318:   if (A->cmap->N != X->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat X: global dim %D %D",A->cmap->N,X->rmap->N);
3319:   if (A->rmap->N != B->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D",A->rmap->N,B->rmap->N);
3320:   if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat A,Mat B: local dim %D %D",A->rmap->n,B->rmap->n);
3321:   if (X->cmap->N < B->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Solution matrix must have same number of columns as rhs matrix");
3322:   if (!A->rmap->N && !A->cmap->N) return(0);
3323:   MatCheckPreallocated(A,1);

3325:   PetscLogEventBegin(MAT_MatSolve,A,B,X,0);
3326:   if (!A->ops->matsolve) {
3327:     PetscInfo1(A,"Mat type %s using basic MatMatSolve\n",((PetscObject)A)->type_name);
3328:     MatMatSolve_Basic(A,B,X);
3329:   } else {
3330:     (*A->ops->matsolve)(A,B,X);
3331:   }
3332:   PetscLogEventEnd(MAT_MatSolve,A,B,X,0);
3333:   PetscObjectStateIncrease((PetscObject)X);
3334:   return(0);
3335: }


3340: /*@
3341:    MatForwardSolve - Solves L x = b, given a factored matrix, A = LU, or
3342:                             U^T*D^(1/2) x = b, given a factored symmetric matrix, A = U^T*D*U,

3344:    Neighbor-wise Collective on Mat and Vec

3346:    Input Parameters:
3347: +  mat - the factored matrix
3348: -  b - the right-hand-side vector

3350:    Output Parameter:
3351: .  x - the result vector

3353:    Notes:
3354:    MatSolve() should be used for most applications, as it performs
3355:    a forward solve followed by a backward solve.

3357:    The vectors b and x cannot be the same,  i.e., one cannot
3358:    call MatForwardSolve(A,x,x).

3360:    For matrix in seqsbaij format with block size larger than 1,
3361:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3362:    MatForwardSolve() solves U^T*D y = b, and
3363:    MatBackwardSolve() solves U x = y.
3364:    Thus they do not provide a symmetric preconditioner.

3366:    Most users should employ the simplified KSP interface for linear solvers
3367:    instead of working directly with matrix algebra routines such as this.
3368:    See, e.g., KSPCreate().

3370:    Level: developer

3372:    Concepts: matrices^forward solves

3374: .seealso: MatSolve(), MatBackwardSolve()
3375: @*/
3376: PetscErrorCode MatForwardSolve(Mat mat,Vec b,Vec x)
3377: {

3387:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3388:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3389:   if (!mat->ops->forwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3390:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3391:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3392:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3393:   MatCheckPreallocated(mat,1);
3394:   PetscLogEventBegin(MAT_ForwardSolve,mat,b,x,0);
3395:   (*mat->ops->forwardsolve)(mat,b,x);
3396:   PetscLogEventEnd(MAT_ForwardSolve,mat,b,x,0);
3397:   PetscObjectStateIncrease((PetscObject)x);
3398:   return(0);
3399: }

3403: /*@
3404:    MatBackwardSolve - Solves U x = b, given a factored matrix, A = LU.
3405:                              D^(1/2) U x = b, given a factored symmetric matrix, A = U^T*D*U,

3407:    Neighbor-wise Collective on Mat and Vec

3409:    Input Parameters:
3410: +  mat - the factored matrix
3411: -  b - the right-hand-side vector

3413:    Output Parameter:
3414: .  x - the result vector

3416:    Notes:
3417:    MatSolve() should be used for most applications, as it performs
3418:    a forward solve followed by a backward solve.

3420:    The vectors b and x cannot be the same.  I.e., one cannot
3421:    call MatBackwardSolve(A,x,x).

3423:    For matrix in seqsbaij format with block size larger than 1,
3424:    the diagonal blocks are not implemented as D = D^(1/2) * D^(1/2) yet.
3425:    MatForwardSolve() solves U^T*D y = b, and
3426:    MatBackwardSolve() solves U x = y.
3427:    Thus they do not provide a symmetric preconditioner.

3429:    Most users should employ the simplified KSP interface for linear solvers
3430:    instead of working directly with matrix algebra routines such as this.
3431:    See, e.g., KSPCreate().

3433:    Level: developer

3435:    Concepts: matrices^backward solves

3437: .seealso: MatSolve(), MatForwardSolve()
3438: @*/
3439: PetscErrorCode MatBackwardSolve(Mat mat,Vec b,Vec x)
3440: {

3450:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3451:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3452:   if (!mat->ops->backwardsolve) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3453:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3454:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3455:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3456:   MatCheckPreallocated(mat,1);

3458:   PetscLogEventBegin(MAT_BackwardSolve,mat,b,x,0);
3459:   (*mat->ops->backwardsolve)(mat,b,x);
3460:   PetscLogEventEnd(MAT_BackwardSolve,mat,b,x,0);
3461:   PetscObjectStateIncrease((PetscObject)x);
3462:   return(0);
3463: }

3467: /*@
3468:    MatSolveAdd - Computes x = y + inv(A)*b, given a factored matrix.

3470:    Neighbor-wise Collective on Mat and Vec

3472:    Input Parameters:
3473: +  mat - the factored matrix
3474: .  b - the right-hand-side vector
3475: -  y - the vector to be added to

3477:    Output Parameter:
3478: .  x - the result vector

3480:    Notes:
3481:    The vectors b and x cannot be the same.  I.e., one cannot
3482:    call MatSolveAdd(A,x,y,x).

3484:    Most users should employ the simplified KSP interface for linear solvers
3485:    instead of working directly with matrix algebra routines such as this.
3486:    See, e.g., KSPCreate().

3488:    Level: developer

3490:    Concepts: matrices^triangular solves

3492: .seealso: MatSolve(), MatSolveTranspose(), MatSolveTransposeAdd()
3493: @*/
3494: PetscErrorCode MatSolveAdd(Mat mat,Vec b,Vec y,Vec x)
3495: {
3496:   PetscScalar    one = 1.0;
3497:   Vec            tmp;

3509:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3510:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3511:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3512:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3513:   if (mat->rmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->rmap->N,y->map->N);
3514:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3515:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3516:   MatCheckPreallocated(mat,1);

3518:   PetscLogEventBegin(MAT_SolveAdd,mat,b,x,y);
3519:   if (mat->ops->solveadd) {
3520:     (*mat->ops->solveadd)(mat,b,y,x);
3521:   } else {
3522:     /* do the solve then the add manually */
3523:     if (x != y) {
3524:       MatSolve(mat,b,x);
3525:       VecAXPY(x,one,y);
3526:     } else {
3527:       VecDuplicate(x,&tmp);
3528:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3529:       VecCopy(x,tmp);
3530:       MatSolve(mat,b,x);
3531:       VecAXPY(x,one,tmp);
3532:       VecDestroy(&tmp);
3533:     }
3534:   }
3535:   PetscLogEventEnd(MAT_SolveAdd,mat,b,x,y);
3536:   PetscObjectStateIncrease((PetscObject)x);
3537:   return(0);
3538: }

3542: /*@
3543:    MatSolveTranspose - Solves A' x = b, given a factored matrix.

3545:    Neighbor-wise Collective on Mat and Vec

3547:    Input Parameters:
3548: +  mat - the factored matrix
3549: -  b - the right-hand-side vector

3551:    Output Parameter:
3552: .  x - the result vector

3554:    Notes:
3555:    The vectors b and x cannot be the same.  I.e., one cannot
3556:    call MatSolveTranspose(A,x,x).

3558:    Most users should employ the simplified KSP interface for linear solvers
3559:    instead of working directly with matrix algebra routines such as this.
3560:    See, e.g., KSPCreate().

3562:    Level: developer

3564:    Concepts: matrices^triangular solves

3566: .seealso: MatSolve(), MatSolveAdd(), MatSolveTransposeAdd()
3567: @*/
3568: PetscErrorCode MatSolveTranspose(Mat mat,Vec b,Vec x)
3569: {

3579:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3580:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3581:   if (!mat->ops->solvetranspose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s",((PetscObject)mat)->type_name);
3582:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3583:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3584:   MatCheckPreallocated(mat,1);
3585:   PetscLogEventBegin(MAT_SolveTranspose,mat,b,x,0);
3586:   if (mat->errortype) {
3587:     PetscInfo1(mat,"MatFactorError %D\n",mat->errortype);
3588:     VecSetInf(x);
3589:   } else {
3590:     (*mat->ops->solvetranspose)(mat,b,x);
3591:   }
3592:   PetscLogEventEnd(MAT_SolveTranspose,mat,b,x,0);
3593:   PetscObjectStateIncrease((PetscObject)x);
3594:   return(0);
3595: }

3599: /*@
3600:    MatSolveTransposeAdd - Computes x = y + inv(Transpose(A)) b, given a
3601:                       factored matrix.

3603:    Neighbor-wise Collective on Mat and Vec

3605:    Input Parameters:
3606: +  mat - the factored matrix
3607: .  b - the right-hand-side vector
3608: -  y - the vector to be added to

3610:    Output Parameter:
3611: .  x - the result vector

3613:    Notes:
3614:    The vectors b and x cannot be the same.  I.e., one cannot
3615:    call MatSolveTransposeAdd(A,x,y,x).

3617:    Most users should employ the simplified KSP interface for linear solvers
3618:    instead of working directly with matrix algebra routines such as this.
3619:    See, e.g., KSPCreate().

3621:    Level: developer

3623:    Concepts: matrices^triangular solves

3625: .seealso: MatSolve(), MatSolveAdd(), MatSolveTranspose()
3626: @*/
3627: PetscErrorCode MatSolveTransposeAdd(Mat mat,Vec b,Vec y,Vec x)
3628: {
3629:   PetscScalar    one = 1.0;
3631:   Vec            tmp;

3642:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
3643:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
3644:   if (mat->rmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->rmap->N,x->map->N);
3645:   if (mat->cmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->cmap->N,b->map->N);
3646:   if (mat->cmap->N != y->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec y: global dim %D %D",mat->cmap->N,y->map->N);
3647:   if (x->map->n != y->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vec x,Vec y: local dim %D %D",x->map->n,y->map->n);
3648:   MatCheckPreallocated(mat,1);

3650:   PetscLogEventBegin(MAT_SolveTransposeAdd,mat,b,x,y);
3651:   if (mat->ops->solvetransposeadd) {
3652:     if (mat->errortype) {
3653:       PetscInfo1(mat,"MatFactorError %D\n",mat->errortype);
3654:       VecSetInf(x);
3655:     } else {
3656:       (*mat->ops->solvetransposeadd)(mat,b,y,x);
3657:     }
3658:   } else {
3659:     /* do the solve then the add manually */
3660:     if (x != y) {
3661:       MatSolveTranspose(mat,b,x);
3662:       VecAXPY(x,one,y);
3663:     } else {
3664:       VecDuplicate(x,&tmp);
3665:       PetscLogObjectParent((PetscObject)mat,(PetscObject)tmp);
3666:       VecCopy(x,tmp);
3667:       MatSolveTranspose(mat,b,x);
3668:       VecAXPY(x,one,tmp);
3669:       VecDestroy(&tmp);
3670:     }
3671:   }
3672:   PetscLogEventEnd(MAT_SolveTransposeAdd,mat,b,x,y);
3673:   PetscObjectStateIncrease((PetscObject)x);
3674:   return(0);
3675: }
3676: /* ----------------------------------------------------------------*/

3680: /*@
3681:    MatSOR - Computes relaxation (SOR, Gauss-Seidel) sweeps.

3683:    Neighbor-wise Collective on Mat and Vec

3685:    Input Parameters:
3686: +  mat - the matrix
3687: .  b - the right hand side
3688: .  omega - the relaxation factor
3689: .  flag - flag indicating the type of SOR (see below)
3690: .  shift -  diagonal shift
3691: .  its - the number of iterations
3692: -  lits - the number of local iterations

3694:    Output Parameters:
3695: .  x - the solution (can contain an initial guess, use option SOR_ZERO_INITIAL_GUESS to indicate no guess)

3697:    SOR Flags:
3698: .     SOR_FORWARD_SWEEP - forward SOR
3699: .     SOR_BACKWARD_SWEEP - backward SOR
3700: .     SOR_SYMMETRIC_SWEEP - SSOR (symmetric SOR)
3701: .     SOR_LOCAL_FORWARD_SWEEP - local forward SOR
3702: .     SOR_LOCAL_BACKWARD_SWEEP - local forward SOR
3703: .     SOR_LOCAL_SYMMETRIC_SWEEP - local SSOR
3704: .     SOR_APPLY_UPPER, SOR_APPLY_LOWER - applies
3705:          upper/lower triangular part of matrix to
3706:          vector (with omega)
3707: .     SOR_ZERO_INITIAL_GUESS - zero initial guess

3709:    Notes:
3710:    SOR_LOCAL_FORWARD_SWEEP, SOR_LOCAL_BACKWARD_SWEEP, and
3711:    SOR_LOCAL_SYMMETRIC_SWEEP perform separate independent smoothings
3712:    on each processor.

3714:    Application programmers will not generally use MatSOR() directly,
3715:    but instead will employ the KSP/PC interface.

3717:    Notes: for BAIJ, SBAIJ, and AIJ matrices with Inodes this does a block SOR smoothing, otherwise it does a pointwise smoothing

3719:    Notes for Advanced Users:
3720:    The flags are implemented as bitwise inclusive or operations.
3721:    For example, use (SOR_ZERO_INITIAL_GUESS | SOR_SYMMETRIC_SWEEP)
3722:    to specify a zero initial guess for SSOR.

3724:    Most users should employ the simplified KSP interface for linear solvers
3725:    instead of working directly with matrix algebra routines such as this.
3726:    See, e.g., KSPCreate().

3728:    Vectors x and b CANNOT be the same

3730:    Developer Note: We should add block SOR support for AIJ matrices with block size set to great than one and no inodes

3732:    Level: developer

3734:    Concepts: matrices^relaxation
3735:    Concepts: matrices^SOR
3736:    Concepts: matrices^Gauss-Seidel

3738: @*/
3739: PetscErrorCode MatSOR(Mat mat,Vec b,PetscReal omega,MatSORType flag,PetscReal shift,PetscInt its,PetscInt lits,Vec x)
3740: {

3750:   if (!mat->ops->sor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
3751:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3752:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3753:   if (mat->cmap->N != x->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec x: global dim %D %D",mat->cmap->N,x->map->N);
3754:   if (mat->rmap->N != b->map->N) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: global dim %D %D",mat->rmap->N,b->map->N);
3755:   if (mat->rmap->n != b->map->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Mat mat,Vec b: local dim %D %D",mat->rmap->n,b->map->n);
3756:   if (its <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D positive",its);
3757:   if (lits <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires local its %D positive",lits);
3758:   if (b == x) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_IDN,"b and x vector cannot be the same");

3760:   MatCheckPreallocated(mat,1);
3761:   PetscLogEventBegin(MAT_SOR,mat,b,x,0);
3762:   ierr =(*mat->ops->sor)(mat,b,omega,flag,shift,its,lits,x);
3763:   PetscLogEventEnd(MAT_SOR,mat,b,x,0);
3764:   PetscObjectStateIncrease((PetscObject)x);
3765:   return(0);
3766: }

3770: /*
3771:       Default matrix copy routine.
3772: */
3773: PetscErrorCode MatCopy_Basic(Mat A,Mat B,MatStructure str)
3774: {
3775:   PetscErrorCode    ierr;
3776:   PetscInt          i,rstart = 0,rend = 0,nz;
3777:   const PetscInt    *cwork;
3778:   const PetscScalar *vwork;

3781:   if (B->assembled) {
3782:     MatZeroEntries(B);
3783:   }
3784:   MatGetOwnershipRange(A,&rstart,&rend);
3785:   for (i=rstart; i<rend; i++) {
3786:     MatGetRow(A,i,&nz,&cwork,&vwork);
3787:     MatSetValues(B,1,&i,nz,cwork,vwork,INSERT_VALUES);
3788:     MatRestoreRow(A,i,&nz,&cwork,&vwork);
3789:   }
3790:   MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
3791:   MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
3792:   PetscObjectStateIncrease((PetscObject)B);
3793:   return(0);
3794: }

3798: /*@
3799:    MatCopy - Copys a matrix to another matrix.

3801:    Collective on Mat

3803:    Input Parameters:
3804: +  A - the matrix
3805: -  str - SAME_NONZERO_PATTERN or DIFFERENT_NONZERO_PATTERN

3807:    Output Parameter:
3808: .  B - where the copy is put

3810:    Notes:
3811:    If you use SAME_NONZERO_PATTERN then the two matrices had better have the
3812:    same nonzero pattern or the routine will crash.

3814:    MatCopy() copies the matrix entries of a matrix to another existing
3815:    matrix (after first zeroing the second matrix).  A related routine is
3816:    MatConvert(), which first creates a new matrix and then copies the data.

3818:    Level: intermediate

3820:    Concepts: matrices^copying

3822: .seealso: MatConvert(), MatDuplicate()

3824: @*/
3825: PetscErrorCode MatCopy(Mat A,Mat B,MatStructure str)
3826: {
3828:   PetscInt       i;

3836:   MatCheckPreallocated(B,2);
3837:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3838:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3839:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim (%D,%D) (%D,%D)",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
3840:   MatCheckPreallocated(A,1);

3842:   PetscLogEventBegin(MAT_Copy,A,B,0,0);
3843:   if (A->ops->copy) {
3844:     (*A->ops->copy)(A,B,str);
3845:   } else { /* generic conversion */
3846:     MatCopy_Basic(A,B,str);
3847:   }

3849:   B->stencil.dim = A->stencil.dim;
3850:   B->stencil.noc = A->stencil.noc;
3851:   for (i=0; i<=A->stencil.dim; i++) {
3852:     B->stencil.dims[i]   = A->stencil.dims[i];
3853:     B->stencil.starts[i] = A->stencil.starts[i];
3854:   }

3856:   PetscLogEventEnd(MAT_Copy,A,B,0,0);
3857:   PetscObjectStateIncrease((PetscObject)B);
3858:   return(0);
3859: }

3863: /*@C
3864:    MatConvert - Converts a matrix to another matrix, either of the same
3865:    or different type.

3867:    Collective on Mat

3869:    Input Parameters:
3870: +  mat - the matrix
3871: .  newtype - new matrix type.  Use MATSAME to create a new matrix of the
3872:    same type as the original matrix.
3873: -  reuse - denotes if the destination matrix is to be created or reused.
3874:    Use MAT_INPLACE_MATRIX for inplace conversion, otherwise use
3875:    MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX.

3877:    Output Parameter:
3878: .  M - pointer to place new matrix

3880:    Notes:
3881:    MatConvert() first creates a new matrix and then copies the data from
3882:    the first matrix.  A related routine is MatCopy(), which copies the matrix
3883:    entries of one matrix to another already existing matrix context.

3885:    Cannot be used to convert a sequential matrix to parallel or parallel to sequential,
3886:    the MPI communicator of the generated matrix is always the same as the communicator
3887:    of the input matrix.

3889:    Level: intermediate

3891:    Concepts: matrices^converting between storage formats

3893: .seealso: MatCopy(), MatDuplicate()
3894: @*/
3895: PetscErrorCode MatConvert(Mat mat, MatType newtype,MatReuse reuse,Mat *M)
3896: {
3898:   PetscBool      sametype,issame,flg;
3899:   char           convname[256],mtype[256];
3900:   Mat            B;

3906:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
3907:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
3908:   MatCheckPreallocated(mat,1);
3909:   MatSetOption(mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);

3911:   PetscOptionsGetString(((PetscObject)mat)->options,((PetscObject)mat)->prefix,"-matconvert_type",mtype,256,&flg);
3912:   if (flg) {
3913:     newtype = mtype;
3914:   }
3915:   PetscObjectTypeCompare((PetscObject)mat,newtype,&sametype);
3916:   PetscStrcmp(newtype,"same",&issame);
3917:   if ((reuse == MAT_INPLACE_MATRIX) && (mat != *M)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"MAT_INPLACE_MATRIX requires same input and output matrix");

3919:   if ((reuse == MAT_INPLACE_MATRIX) && (issame || sametype)) return(0);

3921:   if ((sametype || issame) && (reuse==MAT_INITIAL_MATRIX) && mat->ops->duplicate) {
3922:     (*mat->ops->duplicate)(mat,MAT_COPY_VALUES,M);
3923:   } else {
3924:     PetscErrorCode (*conv)(Mat, MatType,MatReuse,Mat*)=NULL;
3925:     const char     *prefix[3] = {"seq","mpi",""};
3926:     PetscInt       i;
3927:     /*
3928:        Order of precedence:
3929:        1) See if a specialized converter is known to the current matrix.
3930:        2) See if a specialized converter is known to the desired matrix class.
3931:        3) See if a good general converter is registered for the desired class
3932:           (as of 6/27/03 only MATMPIADJ falls into this category).
3933:        4) See if a good general converter is known for the current matrix.
3934:        5) Use a really basic converter.
3935:     */

3937:     /* 1) See if a specialized converter is known to the current matrix and the desired class */
3938:     for (i=0; i<3; i++) {
3939:       PetscStrcpy(convname,"MatConvert_");
3940:       PetscStrcat(convname,((PetscObject)mat)->type_name);
3941:       PetscStrcat(convname,"_");
3942:       PetscStrcat(convname,prefix[i]);
3943:       PetscStrcat(convname,issame ? ((PetscObject)mat)->type_name : newtype);
3944:       PetscStrcat(convname,"_C");
3945:       PetscObjectQueryFunction((PetscObject)mat,convname,&conv);
3946:       if (conv) goto foundconv;
3947:     }

3949:     /* 2)  See if a specialized converter is known to the desired matrix class. */
3950:     MatCreate(PetscObjectComm((PetscObject)mat),&B);
3951:     MatSetSizes(B,mat->rmap->n,mat->cmap->n,mat->rmap->N,mat->cmap->N);
3952:     MatSetType(B,newtype);
3953:     for (i=0; i<3; i++) {
3954:       PetscStrcpy(convname,"MatConvert_");
3955:       PetscStrcat(convname,((PetscObject)mat)->type_name);
3956:       PetscStrcat(convname,"_");
3957:       PetscStrcat(convname,prefix[i]);
3958:       PetscStrcat(convname,newtype);
3959:       PetscStrcat(convname,"_C");
3960:       PetscObjectQueryFunction((PetscObject)B,convname,&conv);
3961:       if (conv) {
3962:         MatDestroy(&B);
3963:         goto foundconv;
3964:       }
3965:     }

3967:     /* 3) See if a good general converter is registered for the desired class */
3968:     conv = B->ops->convertfrom;
3969:     MatDestroy(&B);
3970:     if (conv) goto foundconv;

3972:     /* 4) See if a good general converter is known for the current matrix */
3973:     if (mat->ops->convert) {
3974:       conv = mat->ops->convert;
3975:     }
3976:     if (conv) goto foundconv;

3978:     /* 5) Use a really basic converter. */
3979:     conv = MatConvert_Basic;

3981: foundconv:
3982:     PetscLogEventBegin(MAT_Convert,mat,0,0,0);
3983:     (*conv)(mat,newtype,reuse,M);
3984:     PetscLogEventEnd(MAT_Convert,mat,0,0,0);
3985:   }
3986:   PetscObjectStateIncrease((PetscObject)*M);

3988:   /* Copy Mat options */
3989:   if (mat->symmetric) {MatSetOption(*M,MAT_SYMMETRIC,PETSC_TRUE);}
3990:   if (mat->hermitian) {MatSetOption(*M,MAT_HERMITIAN,PETSC_TRUE);}
3991:   return(0);
3992: }

3996: /*@C
3997:    MatFactorGetSolverPackage - Returns name of the package providing the factorization routines

3999:    Not Collective

4001:    Input Parameter:
4002: .  mat - the matrix, must be a factored matrix

4004:    Output Parameter:
4005: .   type - the string name of the package (do not free this string)

4007:    Notes:
4008:       In Fortran you pass in a empty string and the package name will be copied into it.
4009:     (Make sure the string is long enough)

4011:    Level: intermediate

4013: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable(), MatGetFactor()
4014: @*/
4015: PetscErrorCode MatFactorGetSolverPackage(Mat mat, const MatSolverPackage *type)
4016: {
4017:   PetscErrorCode ierr, (*conv)(Mat,const MatSolverPackage*);

4022:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
4023:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorGetSolverPackage_C",&conv);
4024:   if (!conv) {
4025:     *type = MATSOLVERPETSC;
4026:   } else {
4027:     (*conv)(mat,type);
4028:   }
4029:   return(0);
4030: }

4032: typedef struct _MatSolverPackageForSpecifcType* MatSolverPackageForSpecifcType;
4033: struct _MatSolverPackageForSpecifcType {
4034:   MatType                        mtype;
4035:   PetscErrorCode                 (*getfactor[4])(Mat,MatFactorType,Mat*);
4036:   MatSolverPackageForSpecifcType next;
4037: };

4039: typedef struct _MatSolverPackageHolder* MatSolverPackageHolder;
4040: struct _MatSolverPackageHolder {
4041:   char                           *name;
4042:   MatSolverPackageForSpecifcType handlers;
4043:   MatSolverPackageHolder         next;
4044: };

4046: static MatSolverPackageHolder MatSolverPackageHolders = NULL;

4050: /*@C
4051:    MatSolvePackageRegister - Registers a MatSolverPackage that works for a particular matrix type

4053:    Input Parameters:
4054: +    package - name of the package, for example petsc or superlu
4055: .    mtype - the matrix type that works with this package
4056: .    ftype - the type of factorization supported by the package
4057: -    getfactor - routine that will create the factored matrix ready to be used

4059:     Level: intermediate

4061: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4062: @*/
4063: PetscErrorCode MatSolverPackageRegister(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscErrorCode (*getfactor)(Mat,MatFactorType,Mat*))
4064: {
4065:   PetscErrorCode                 ierr;
4066:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4067:   PetscBool                      flg;
4068:   MatSolverPackageForSpecifcType inext,iprev = NULL;

4071:   if (!next) {
4072:     PetscNew(&MatSolverPackageHolders);
4073:     PetscStrallocpy(package,&MatSolverPackageHolders->name);
4074:     PetscNew(&MatSolverPackageHolders->handlers);
4075:     PetscStrallocpy(mtype,(char **)&MatSolverPackageHolders->handlers->mtype);
4076:     MatSolverPackageHolders->handlers->getfactor[(int)ftype-1] = getfactor;
4077:     return(0);
4078:   }
4079:   while (next) {
4080:     PetscStrcasecmp(package,next->name,&flg);
4081:     if (flg) {
4082:       inext = next->handlers;
4083:       while (inext) {
4084:         PetscStrcasecmp(mtype,inext->mtype,&flg);
4085:         if (flg) {
4086:           inext->getfactor[(int)ftype-1] = getfactor;
4087:           return(0);
4088:         }
4089:         iprev = inext;
4090:         inext = inext->next;
4091:       }
4092:       PetscNew(&iprev->next);
4093:       PetscStrallocpy(mtype,(char **)&iprev->next->mtype);
4094:       iprev->next->getfactor[(int)ftype-1] = getfactor;
4095:       return(0);
4096:     }
4097:     prev = next;
4098:     next = next->next;
4099:   }
4100:   PetscNew(&prev->next);
4101:   PetscStrallocpy(package,&prev->next->name);
4102:   PetscNew(&prev->next->handlers);
4103:   PetscStrallocpy(mtype,(char **)&prev->next->handlers->mtype);
4104:   prev->next->handlers->getfactor[(int)ftype-1] = getfactor;
4105:   return(0);
4106: }

4110: /*@C
4111:    MatSolvePackageGet - Get's the function that creates the factor matrix if it exist

4113:    Input Parameters:
4114: +    package - name of the package, for example petsc or superlu
4115: .    ftype - the type of factorization supported by the package
4116: -    mtype - the matrix type that works with this package

4118:    Output Parameters:
4119: +   foundpackage - PETSC_TRUE if the package was registered
4120: .   foundmtype - PETSC_TRUE if the package supports the requested mtype
4121: -   getfactor - routine that will create the factored matrix ready to be used or NULL if not found

4123:     Level: intermediate

4125: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4126: @*/
4127: PetscErrorCode MatSolverPackageGet(const MatSolverPackage package,const MatType mtype,MatFactorType ftype,PetscBool *foundpackage,PetscBool *foundmtype,PetscErrorCode (**getfactor)(Mat,MatFactorType,Mat*))
4128: {
4129:   PetscErrorCode                 ierr;
4130:   MatSolverPackageHolder         next = MatSolverPackageHolders;
4131:   PetscBool                      flg;
4132:   MatSolverPackageForSpecifcType inext;

4135:   if (foundpackage) *foundpackage = PETSC_FALSE;
4136:   if (foundmtype)   *foundmtype   = PETSC_FALSE;
4137:   if (getfactor)    *getfactor    = NULL;

4139:   if (package) {
4140:     while (next) {
4141:       PetscStrcasecmp(package,next->name,&flg);
4142:       if (flg) {
4143:         if (foundpackage) *foundpackage = PETSC_TRUE;
4144:         inext = next->handlers;
4145:         while (inext) {
4146:           PetscStrcasecmp(mtype,inext->mtype,&flg);
4147:           if (flg) {
4148:             if (foundmtype) *foundmtype = PETSC_TRUE;
4149:             if (getfactor)  *getfactor  = inext->getfactor[(int)ftype-1];
4150:             return(0);
4151:           }
4152:           inext = inext->next;
4153:         }
4154:       }
4155:       next = next->next;
4156:     }
4157:   } else {
4158:     while (next) {
4159:       inext = next->handlers;
4160:       while (inext) {
4161:         PetscStrcasecmp(mtype,inext->mtype,&flg);
4162:         if (flg && inext->getfactor[(int)ftype-1]) {
4163:           if (foundpackage) *foundpackage = PETSC_TRUE;
4164:           if (foundmtype)   *foundmtype   = PETSC_TRUE;
4165:           if (getfactor)    *getfactor    = inext->getfactor[(int)ftype-1];
4166:           return(0);
4167:         }
4168:         inext = inext->next;
4169:       }
4170:       next = next->next;
4171:     }
4172:   }
4173:   return(0);
4174: }

4178: PetscErrorCode MatSolverPackageDestroy(void)
4179: {
4180:   PetscErrorCode                 ierr;
4181:   MatSolverPackageHolder         next = MatSolverPackageHolders,prev;
4182:   MatSolverPackageForSpecifcType inext,iprev;

4185:   while (next) {
4186:     PetscFree(next->name);
4187:     inext = next->handlers;
4188:     while (inext) {
4189:       PetscFree(inext->mtype);
4190:       iprev = inext;
4191:       inext = inext->next;
4192:       PetscFree(iprev);
4193:     }
4194:     prev = next;
4195:     next = next->next;
4196:     PetscFree(prev);
4197:   }
4198:   MatSolverPackageHolders = NULL;
4199:   return(0);
4200: }

4204: /*@C
4205:    MatGetFactor - Returns a matrix suitable to calls to MatXXFactorSymbolic()

4207:    Collective on Mat

4209:    Input Parameters:
4210: +  mat - the matrix
4211: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4212: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4214:    Output Parameters:
4215: .  f - the factor matrix used with MatXXFactorSymbolic() calls

4217:    Notes:
4218:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4219:      such as pastix, superlu, mumps etc.

4221:       PETSc must have been ./configure to use the external solver, using the option --download-package

4223:    Level: intermediate

4225: .seealso: MatCopy(), MatDuplicate(), MatGetFactorAvailable()
4226: @*/
4227: PetscErrorCode MatGetFactor(Mat mat, const MatSolverPackage type,MatFactorType ftype,Mat *f)
4228: {
4229:   PetscErrorCode ierr,(*conv)(Mat,MatFactorType,Mat*);
4230:   PetscBool      foundpackage,foundmtype;


4236:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4237:   MatCheckPreallocated(mat,1);

4239:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,&foundpackage,&foundmtype,&conv);
4240:   if (!foundpackage) {
4241:     if (type) {
4242:       SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate solver package %s. Perhaps you must ./configure with --download-%s",type,type);
4243:     } else {
4244:       SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"Could not locate a solver package. Perhaps you must ./configure with --download-<package>");
4245:     }
4246:   }
4247: 
4248:   if (!foundmtype) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support matrix type %s",type,((PetscObject)mat)->type_name);
4249:   if (!conv) SETERRQ3(PetscObjectComm((PetscObject)mat),PETSC_ERR_MISSING_FACTOR,"MatSolverPackage %s does not support factorization type %s for  matrix type %s",type,MatFactorTypes[ftype],((PetscObject)mat)->type_name);

4251:   (*conv)(mat,ftype,f);
4252:   return(0);
4253: }

4257: /*@C
4258:    MatGetFactorAvailable - Returns a a flag if matrix supports particular package and factor type

4260:    Not Collective

4262:    Input Parameters:
4263: +  mat - the matrix
4264: .  type - name of solver type, for example, superlu, petsc (to use PETSc's default)
4265: -  ftype - factor type, MAT_FACTOR_LU, MAT_FACTOR_CHOLESKY, MAT_FACTOR_ICC, MAT_FACTOR_ILU,

4267:    Output Parameter:
4268: .    flg - PETSC_TRUE if the factorization is available

4270:    Notes:
4271:       Some PETSc matrix formats have alternative solvers available that are contained in alternative packages
4272:      such as pastix, superlu, mumps etc.

4274:       PETSc must have been ./configure to use the external solver, using the option --download-package

4276:    Level: intermediate

4278: .seealso: MatCopy(), MatDuplicate(), MatGetFactor()
4279: @*/
4280: PetscErrorCode MatGetFactorAvailable(Mat mat, const MatSolverPackage type,MatFactorType ftype,PetscBool  *flg)
4281: {
4282:   PetscErrorCode ierr, (*gconv)(Mat,MatFactorType,Mat*);


4288:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4289:   MatCheckPreallocated(mat,1);

4291:   *flg = PETSC_FALSE;
4292:   MatSolverPackageGet(type,((PetscObject)mat)->type_name,ftype,NULL,NULL,&gconv);
4293:   if (gconv) {
4294:     *flg = PETSC_TRUE;
4295:   }
4296:   return(0);
4297: }

4299: #include <petscdmtypes.h>

4303: /*@
4304:    MatDuplicate - Duplicates a matrix including the non-zero structure.

4306:    Collective on Mat

4308:    Input Parameters:
4309: +  mat - the matrix
4310: -  op - either MAT_DO_NOT_COPY_VALUES or MAT_COPY_VALUES, cause it to copy the numerical values in the matrix
4311:         MAT_SHARE_NONZERO_PATTERN to share the nonzero patterns with the previous matrix and not copy them.

4313:    Output Parameter:
4314: .  M - pointer to place new matrix

4316:    Level: intermediate

4318:    Concepts: matrices^duplicating

4320:     Notes: You cannot change the nonzero pattern for the parent or child matrix if you use MAT_SHARE_NONZERO_PATTERN.

4322: .seealso: MatCopy(), MatConvert()
4323: @*/
4324: PetscErrorCode MatDuplicate(Mat mat,MatDuplicateOption op,Mat *M)
4325: {
4327:   Mat            B;
4328:   PetscInt       i;
4329:   DM             dm;

4335:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4336:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4337:   MatCheckPreallocated(mat,1);

4339:   *M = 0;
4340:   if (!mat->ops->duplicate) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not written for this matrix type");
4341:   PetscLogEventBegin(MAT_Convert,mat,0,0,0);
4342:   (*mat->ops->duplicate)(mat,op,M);
4343:   B    = *M;

4345:   B->stencil.dim = mat->stencil.dim;
4346:   B->stencil.noc = mat->stencil.noc;
4347:   for (i=0; i<=mat->stencil.dim; i++) {
4348:     B->stencil.dims[i]   = mat->stencil.dims[i];
4349:     B->stencil.starts[i] = mat->stencil.starts[i];
4350:   }

4352:   B->nooffproczerorows = mat->nooffproczerorows;
4353:   B->nooffprocentries  = mat->nooffprocentries;

4355:   PetscObjectQuery((PetscObject) mat, "__PETSc_dm", (PetscObject*) &dm);
4356:   if (dm) {
4357:     PetscObjectCompose((PetscObject) B, "__PETSc_dm", (PetscObject) dm);
4358:   }
4359:   PetscLogEventEnd(MAT_Convert,mat,0,0,0);
4360:   PetscObjectStateIncrease((PetscObject)B);
4361:   return(0);
4362: }

4366: /*@
4367:    MatGetDiagonal - Gets the diagonal of a matrix.

4369:    Logically Collective on Mat and Vec

4371:    Input Parameters:
4372: +  mat - the matrix
4373: -  v - the vector for storing the diagonal

4375:    Output Parameter:
4376: .  v - the diagonal of the matrix

4378:    Level: intermediate

4380:    Note:
4381:    Currently only correct in parallel for square matrices.

4383:    Concepts: matrices^accessing diagonals

4385: .seealso: MatGetRow(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs()
4386: @*/
4387: PetscErrorCode MatGetDiagonal(Mat mat,Vec v)
4388: {

4395:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4396:   if (!mat->ops->getdiagonal) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4397:   MatCheckPreallocated(mat,1);

4399:   (*mat->ops->getdiagonal)(mat,v);
4400:   PetscObjectStateIncrease((PetscObject)v);
4401:   return(0);
4402: }

4406: /*@C
4407:    MatGetRowMin - Gets the minimum value (of the real part) of each
4408:         row of the matrix

4410:    Logically Collective on Mat and Vec

4412:    Input Parameters:
4413: .  mat - the matrix

4415:    Output Parameter:
4416: +  v - the vector for storing the maximums
4417: -  idx - the indices of the column found for each row (optional)

4419:    Level: intermediate

4421:    Notes: The result of this call are the same as if one converted the matrix to dense format
4422:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4424:     This code is only implemented for a couple of matrix formats.

4426:    Concepts: matrices^getting row maximums

4428: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(),
4429:           MatGetRowMax()
4430: @*/
4431: PetscErrorCode MatGetRowMin(Mat mat,Vec v,PetscInt idx[])
4432: {

4439:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4440:   if (!mat->ops->getrowmax) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4441:   MatCheckPreallocated(mat,1);

4443:   (*mat->ops->getrowmin)(mat,v,idx);
4444:   PetscObjectStateIncrease((PetscObject)v);
4445:   return(0);
4446: }

4450: /*@C
4451:    MatGetRowMinAbs - Gets the minimum value (in absolute value) of each
4452:         row of the matrix

4454:    Logically Collective on Mat and Vec

4456:    Input Parameters:
4457: .  mat - the matrix

4459:    Output Parameter:
4460: +  v - the vector for storing the minimums
4461: -  idx - the indices of the column found for each row (or NULL if not needed)

4463:    Level: intermediate

4465:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4466:     row is 0 (the first column).

4468:     This code is only implemented for a couple of matrix formats.

4470:    Concepts: matrices^getting row maximums

4472: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMaxAbs(), MatGetRowMin()
4473: @*/
4474: PetscErrorCode MatGetRowMinAbs(Mat mat,Vec v,PetscInt idx[])
4475: {

4482:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4483:   if (!mat->ops->getrowminabs) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4484:   MatCheckPreallocated(mat,1);
4485:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4487:   (*mat->ops->getrowminabs)(mat,v,idx);
4488:   PetscObjectStateIncrease((PetscObject)v);
4489:   return(0);
4490: }

4494: /*@C
4495:    MatGetRowMax - Gets the maximum value (of the real part) of each
4496:         row of the matrix

4498:    Logically Collective on Mat and Vec

4500:    Input Parameters:
4501: .  mat - the matrix

4503:    Output Parameter:
4504: +  v - the vector for storing the maximums
4505: -  idx - the indices of the column found for each row (optional)

4507:    Level: intermediate

4509:    Notes: The result of this call are the same as if one converted the matrix to dense format
4510:       and found the minimum value in each row (i.e. the implicit zeros are counted as zeros).

4512:     This code is only implemented for a couple of matrix formats.

4514:    Concepts: matrices^getting row maximums

4516: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMaxAbs(), MatGetRowMin()
4517: @*/
4518: PetscErrorCode MatGetRowMax(Mat mat,Vec v,PetscInt idx[])
4519: {

4526:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4527:   if (!mat->ops->getrowmax) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4528:   MatCheckPreallocated(mat,1);

4530:   (*mat->ops->getrowmax)(mat,v,idx);
4531:   PetscObjectStateIncrease((PetscObject)v);
4532:   return(0);
4533: }

4537: /*@C
4538:    MatGetRowMaxAbs - Gets the maximum value (in absolute value) of each
4539:         row of the matrix

4541:    Logically Collective on Mat and Vec

4543:    Input Parameters:
4544: .  mat - the matrix

4546:    Output Parameter:
4547: +  v - the vector for storing the maximums
4548: -  idx - the indices of the column found for each row (or NULL if not needed)

4550:    Level: intermediate

4552:    Notes: if a row is completely empty or has only 0.0 values then the idx[] value for that
4553:     row is 0 (the first column).

4555:     This code is only implemented for a couple of matrix formats.

4557:    Concepts: matrices^getting row maximums

4559: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4560: @*/
4561: PetscErrorCode MatGetRowMaxAbs(Mat mat,Vec v,PetscInt idx[])
4562: {

4569:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4570:   if (!mat->ops->getrowmaxabs) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4571:   MatCheckPreallocated(mat,1);
4572:   if (idx) {PetscMemzero(idx,mat->rmap->n*sizeof(PetscInt));}

4574:   (*mat->ops->getrowmaxabs)(mat,v,idx);
4575:   PetscObjectStateIncrease((PetscObject)v);
4576:   return(0);
4577: }

4581: /*@
4582:    MatGetRowSum - Gets the sum of each row of the matrix

4584:    Logically Collective on Mat and Vec

4586:    Input Parameters:
4587: .  mat - the matrix

4589:    Output Parameter:
4590: .  v - the vector for storing the sum of rows

4592:    Level: intermediate

4594:    Notes: This code is slow since it is not currently specialized for different formats

4596:    Concepts: matrices^getting row sums

4598: .seealso: MatGetDiagonal(), MatGetSubMatrices(), MatGetSubmatrix(), MatGetRowMax(), MatGetRowMin()
4599: @*/
4600: PetscErrorCode MatGetRowSum(Mat mat, Vec v)
4601: {
4602:   PetscInt       start = 0, end = 0, row;
4603:   PetscScalar    *array;

4610:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4611:   MatCheckPreallocated(mat,1);
4612:   MatGetOwnershipRange(mat, &start, &end);
4613:   VecGetArray(v, &array);
4614:   for (row = start; row < end; ++row) {
4615:     PetscInt          ncols, col;
4616:     const PetscInt    *cols;
4617:     const PetscScalar *vals;

4619:     array[row - start] = 0.0;

4621:     MatGetRow(mat, row, &ncols, &cols, &vals);
4622:     for (col = 0; col < ncols; col++) {
4623:       array[row - start] += vals[col];
4624:     }
4625:     MatRestoreRow(mat, row, &ncols, &cols, &vals);
4626:   }
4627:   VecRestoreArray(v, &array);
4628:   PetscObjectStateIncrease((PetscObject) v);
4629:   return(0);
4630: }

4634: /*@
4635:    MatTranspose - Computes an in-place or out-of-place transpose of a matrix.

4637:    Collective on Mat

4639:    Input Parameter:
4640: +  mat - the matrix to transpose
4641: -  reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

4643:    Output Parameters:
4644: .  B - the transpose

4646:    Notes:
4647:      If you  pass in &mat for B the transpose will be done in place, for example MatTranspose(mat,MAT_REUSE_MATRIX,&mat);

4649:      Consider using MatCreateTranspose() instead if you only need a matrix that behaves like the transpose, but don't need the storage to be changed.

4651:    Level: intermediate

4653:    Concepts: matrices^transposing

4655: .seealso: MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4656: @*/
4657: PetscErrorCode MatTranspose(Mat mat,MatReuse reuse,Mat *B)
4658: {

4664:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4665:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4666:   if (!mat->ops->transpose) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4667:   MatCheckPreallocated(mat,1);

4669:   PetscLogEventBegin(MAT_Transpose,mat,0,0,0);
4670:   (*mat->ops->transpose)(mat,reuse,B);
4671:   PetscLogEventEnd(MAT_Transpose,mat,0,0,0);
4672:   if (B) {PetscObjectStateIncrease((PetscObject)*B);}
4673:   return(0);
4674: }

4678: /*@
4679:    MatIsTranspose - Test whether a matrix is another one's transpose,
4680:         or its own, in which case it tests symmetry.

4682:    Collective on Mat

4684:    Input Parameter:
4685: +  A - the matrix to test
4686: -  B - the matrix to test against, this can equal the first parameter

4688:    Output Parameters:
4689: .  flg - the result

4691:    Notes:
4692:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4693:    has a running time of the order of the number of nonzeros; the parallel
4694:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4696:    Level: intermediate

4698:    Concepts: matrices^transposing, matrix^symmetry

4700: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian()
4701: @*/
4702: PetscErrorCode MatIsTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4703: {
4704:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4710:   PetscObjectQueryFunction((PetscObject)A,"MatIsTranspose_C",&f);
4711:   PetscObjectQueryFunction((PetscObject)B,"MatIsTranspose_C",&g);
4712:   *flg = PETSC_FALSE;
4713:   if (f && g) {
4714:     if (f == g) {
4715:       (*f)(A,B,tol,flg);
4716:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for symmetry test");
4717:   } else {
4718:     MatType mattype;
4719:     if (!f) {
4720:       MatGetType(A,&mattype);
4721:     } else {
4722:       MatGetType(B,&mattype);
4723:     }
4724:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for transpose",mattype);
4725:   }
4726:   return(0);
4727: }

4731: /*@
4732:    MatHermitianTranspose - Computes an in-place or out-of-place transpose of a matrix in complex conjugate.

4734:    Collective on Mat

4736:    Input Parameter:
4737: +  mat - the matrix to transpose and complex conjugate
4738: -  reuse - store the transpose matrix in the provided B

4740:    Output Parameters:
4741: .  B - the Hermitian

4743:    Notes:
4744:      If you  pass in &mat for B the Hermitian will be done in place

4746:    Level: intermediate

4748:    Concepts: matrices^transposing, complex conjugatex

4750: .seealso: MatTranspose(), MatMultTranspose(), MatMultTransposeAdd(), MatIsTranspose(), MatReuse
4751: @*/
4752: PetscErrorCode MatHermitianTranspose(Mat mat,MatReuse reuse,Mat *B)
4753: {

4757:   MatTranspose(mat,reuse,B);
4758: #if defined(PETSC_USE_COMPLEX)
4759:   MatConjugate(*B);
4760: #endif
4761:   return(0);
4762: }

4766: /*@
4767:    MatIsHermitianTranspose - Test whether a matrix is another one's Hermitian transpose,

4769:    Collective on Mat

4771:    Input Parameter:
4772: +  A - the matrix to test
4773: -  B - the matrix to test against, this can equal the first parameter

4775:    Output Parameters:
4776: .  flg - the result

4778:    Notes:
4779:    Only available for SeqAIJ/MPIAIJ matrices. The sequential algorithm
4780:    has a running time of the order of the number of nonzeros; the parallel
4781:    test involves parallel copies of the block-offdiagonal parts of the matrix.

4783:    Level: intermediate

4785:    Concepts: matrices^transposing, matrix^symmetry

4787: .seealso: MatTranspose(), MatIsSymmetric(), MatIsHermitian(), MatIsTranspose()
4788: @*/
4789: PetscErrorCode MatIsHermitianTranspose(Mat A,Mat B,PetscReal tol,PetscBool  *flg)
4790: {
4791:   PetscErrorCode ierr,(*f)(Mat,Mat,PetscReal,PetscBool*),(*g)(Mat,Mat,PetscReal,PetscBool*);

4797:   PetscObjectQueryFunction((PetscObject)A,"MatIsHermitianTranspose_C",&f);
4798:   PetscObjectQueryFunction((PetscObject)B,"MatIsHermitianTranspose_C",&g);
4799:   if (f && g) {
4800:     if (f==g) {
4801:       (*f)(A,B,tol,flg);
4802:     } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_NOTSAMETYPE,"Matrices do not have the same comparator for Hermitian test");
4803:   }
4804:   return(0);
4805: }

4809: /*@
4810:    MatPermute - Creates a new matrix with rows and columns permuted from the
4811:    original.

4813:    Collective on Mat

4815:    Input Parameters:
4816: +  mat - the matrix to permute
4817: .  row - row permutation, each processor supplies only the permutation for its rows
4818: -  col - column permutation, each processor supplies only the permutation for its columns

4820:    Output Parameters:
4821: .  B - the permuted matrix

4823:    Level: advanced

4825:    Note:
4826:    The index sets map from row/col of permuted matrix to row/col of original matrix.
4827:    The index sets should be on the same communicator as Mat and have the same local sizes.

4829:    Concepts: matrices^permuting

4831: .seealso: MatGetOrdering(), ISAllGather()

4833: @*/
4834: PetscErrorCode MatPermute(Mat mat,IS row,IS col,Mat *B)
4835: {

4844:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4845:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4846:   if (!mat->ops->permute) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatPermute not available for Mat type %s",((PetscObject)mat)->type_name);
4847:   MatCheckPreallocated(mat,1);

4849:   (*mat->ops->permute)(mat,row,col,B);
4850:   PetscObjectStateIncrease((PetscObject)*B);
4851:   return(0);
4852: }

4856: /*@
4857:    MatEqual - Compares two matrices.

4859:    Collective on Mat

4861:    Input Parameters:
4862: +  A - the first matrix
4863: -  B - the second matrix

4865:    Output Parameter:
4866: .  flg - PETSC_TRUE if the matrices are equal; PETSC_FALSE otherwise.

4868:    Level: intermediate

4870:    Concepts: matrices^equality between
4871: @*/
4872: PetscErrorCode MatEqual(Mat A,Mat B,PetscBool  *flg)
4873: {

4883:   MatCheckPreallocated(B,2);
4884:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4885:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4886:   if (A->rmap->N != B->rmap->N || A->cmap->N != B->cmap->N) SETERRQ4(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Mat A,Mat B: global dim %D %D %D %D",A->rmap->N,B->rmap->N,A->cmap->N,B->cmap->N);
4887:   if (!A->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)A)->type_name);
4888:   if (!B->ops->equal) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Mat type %s",((PetscObject)B)->type_name);
4889:   if (A->ops->equal != B->ops->equal) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"A is type: %s\nB is type: %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
4890:   MatCheckPreallocated(A,1);

4892:   (*A->ops->equal)(A,B,flg);
4893:   return(0);
4894: }

4898: /*@
4899:    MatDiagonalScale - Scales a matrix on the left and right by diagonal
4900:    matrices that are stored as vectors.  Either of the two scaling
4901:    matrices can be NULL.

4903:    Collective on Mat

4905:    Input Parameters:
4906: +  mat - the matrix to be scaled
4907: .  l - the left scaling vector (or NULL)
4908: -  r - the right scaling vector (or NULL)

4910:    Notes:
4911:    MatDiagonalScale() computes A = LAR, where
4912:    L = a diagonal matrix (stored as a vector), R = a diagonal matrix (stored as a vector)
4913:    The L scales the rows of the matrix, the R scales the columns of the matrix.

4915:    Level: intermediate

4917:    Concepts: matrices^diagonal scaling
4918:    Concepts: diagonal scaling of matrices

4920: .seealso: MatScale()
4921: @*/
4922: PetscErrorCode MatDiagonalScale(Mat mat,Vec l,Vec r)
4923: {

4929:   if (!mat->ops->diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4932:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4933:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4934:   MatCheckPreallocated(mat,1);

4936:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4937:   (*mat->ops->diagonalscale)(mat,l,r);
4938:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4939:   PetscObjectStateIncrease((PetscObject)mat);
4940: #if defined(PETSC_HAVE_CUSP)
4941:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4942:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4943:   }
4944: #elif defined(PETSC_HAVE_VIENNACL)
4945:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
4946:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
4947:   }
4948: #elif defined(PETSC_HAVE_VECCUDA)
4949:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
4950:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
4951:   }
4952: #endif
4953:   return(0);
4954: }

4958: /*@
4959:     MatScale - Scales all elements of a matrix by a given number.

4961:     Logically Collective on Mat

4963:     Input Parameters:
4964: +   mat - the matrix to be scaled
4965: -   a  - the scaling value

4967:     Output Parameter:
4968: .   mat - the scaled matrix

4970:     Level: intermediate

4972:     Concepts: matrices^scaling all entries

4974: .seealso: MatDiagonalScale()
4975: @*/
4976: PetscErrorCode MatScale(Mat mat,PetscScalar a)
4977: {

4983:   if (a != (PetscScalar)1.0 && !mat->ops->scale) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
4984:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
4985:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
4987:   MatCheckPreallocated(mat,1);

4989:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
4990:   if (a != (PetscScalar)1.0) {
4991:     (*mat->ops->scale)(mat,a);
4992:     PetscObjectStateIncrease((PetscObject)mat);
4993:   }
4994:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
4995: #if defined(PETSC_HAVE_CUSP)
4996:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
4997:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
4998:   }
4999: #elif defined(PETSC_HAVE_VIENNACL)
5000:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5001:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5002:   }
5003: #elif defined(PETSC_HAVE_VECCUDA)
5004:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5005:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5006:   }
5007: #endif
5008:   return(0);
5009: }

5013: /*@
5014:    MatNorm - Calculates various norms of a matrix.

5016:    Collective on Mat

5018:    Input Parameters:
5019: +  mat - the matrix
5020: -  type - the type of norm, NORM_1, NORM_FROBENIUS, NORM_INFINITY

5022:    Output Parameters:
5023: .  nrm - the resulting norm

5025:    Level: intermediate

5027:    Concepts: matrices^norm
5028:    Concepts: norm^of matrix
5029: @*/
5030: PetscErrorCode MatNorm(Mat mat,NormType type,PetscReal *nrm)
5031: {


5039:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5040:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5041:   if (!mat->ops->norm) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5042:   MatCheckPreallocated(mat,1);

5044:   (*mat->ops->norm)(mat,type,nrm);
5045:   return(0);
5046: }

5048: /*
5049:      This variable is used to prevent counting of MatAssemblyBegin() that
5050:    are called from within a MatAssemblyEnd().
5051: */
5052: static PetscInt MatAssemblyEnd_InUse = 0;
5055: /*@
5056:    MatAssemblyBegin - Begins assembling the matrix.  This routine should
5057:    be called after completing all calls to MatSetValues().

5059:    Collective on Mat

5061:    Input Parameters:
5062: +  mat - the matrix
5063: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5065:    Notes:
5066:    MatSetValues() generally caches the values.  The matrix is ready to
5067:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5068:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5069:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5070:    using the matrix.

5072:    ALL processes that share a matrix MUST call MatAssemblyBegin() and MatAssemblyEnd() the SAME NUMBER of times, and each time with the
5073:    same flag of MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY for all processes. Thus you CANNOT locally change from ADD_VALUES to INSERT_VALUES, that is
5074:    a global collective operation requring all processes that share the matrix.

5076:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5077:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5078:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5080:    Level: beginner

5082:    Concepts: matrices^assembling

5084: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssembled()
5085: @*/
5086: PetscErrorCode MatAssemblyBegin(Mat mat,MatAssemblyType type)
5087: {

5093:   MatCheckPreallocated(mat,1);
5094:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix.\nDid you forget to call MatSetUnfactored()?");
5095:   if (mat->assembled) {
5096:     mat->was_assembled = PETSC_TRUE;
5097:     mat->assembled     = PETSC_FALSE;
5098:   }
5099:   if (!MatAssemblyEnd_InUse) {
5100:     PetscLogEventBegin(MAT_AssemblyBegin,mat,0,0,0);
5101:     if (mat->ops->assemblybegin) {(*mat->ops->assemblybegin)(mat,type);}
5102:     PetscLogEventEnd(MAT_AssemblyBegin,mat,0,0,0);
5103:   } else if (mat->ops->assemblybegin) {
5104:     (*mat->ops->assemblybegin)(mat,type);
5105:   }
5106:   return(0);
5107: }

5111: /*@
5112:    MatAssembled - Indicates if a matrix has been assembled and is ready for
5113:      use; for example, in matrix-vector product.

5115:    Not Collective

5117:    Input Parameter:
5118: .  mat - the matrix

5120:    Output Parameter:
5121: .  assembled - PETSC_TRUE or PETSC_FALSE

5123:    Level: advanced

5125:    Concepts: matrices^assembled?

5127: .seealso: MatAssemblyEnd(), MatSetValues(), MatAssemblyBegin()
5128: @*/
5129: PetscErrorCode MatAssembled(Mat mat,PetscBool  *assembled)
5130: {
5135:   *assembled = mat->assembled;
5136:   return(0);
5137: }

5141: /*@
5142:    MatAssemblyEnd - Completes assembling the matrix.  This routine should
5143:    be called after MatAssemblyBegin().

5145:    Collective on Mat

5147:    Input Parameters:
5148: +  mat - the matrix
5149: -  type - type of assembly, either MAT_FLUSH_ASSEMBLY or MAT_FINAL_ASSEMBLY

5151:    Options Database Keys:
5152: +  -mat_view ::ascii_info - Prints info on matrix at conclusion of MatEndAssembly()
5153: .  -mat_view ::ascii_info_detail - Prints more detailed info
5154: .  -mat_view - Prints matrix in ASCII format
5155: .  -mat_view ::ascii_matlab - Prints matrix in Matlab format
5156: .  -mat_view draw - PetscDraws nonzero structure of matrix, using MatView() and PetscDrawOpenX().
5157: .  -display <name> - Sets display name (default is host)
5158: .  -draw_pause <sec> - Sets number of seconds to pause after display
5159: .  -mat_view socket - Sends matrix to socket, can be accessed from Matlab (See Users-Manual: Chapter 11 Using MATLAB with PETSc )
5160: .  -viewer_socket_machine <machine> - Machine to use for socket
5161: .  -viewer_socket_port <port> - Port number to use for socket
5162: -  -mat_view binary:filename[:append] - Save matrix to file in binary format

5164:    Notes:
5165:    MatSetValues() generally caches the values.  The matrix is ready to
5166:    use only after MatAssemblyBegin() and MatAssemblyEnd() have been called.
5167:    Use MAT_FLUSH_ASSEMBLY when switching between ADD_VALUES and INSERT_VALUES
5168:    in MatSetValues(); use MAT_FINAL_ASSEMBLY for the final assembly before
5169:    using the matrix.

5171:    Space for preallocated nonzeros that is not filled by a call to MatSetValues() or a related routine are compressed
5172:    out by assembly. If you intend to use that extra space on a subsequent assembly, be sure to insert explicit zeros
5173:    before MAT_FINAL_ASSEMBLY so the space is not compressed out.

5175:    Level: beginner

5177: .seealso: MatAssemblyBegin(), MatSetValues(), PetscDrawOpenX(), PetscDrawCreate(), MatView(), MatAssembled(), PetscViewerSocketOpen()
5178: @*/
5179: PetscErrorCode MatAssemblyEnd(Mat mat,MatAssemblyType type)
5180: {
5181:   PetscErrorCode  ierr;
5182:   static PetscInt inassm = 0;
5183:   PetscBool       flg    = PETSC_FALSE;


5189:   inassm++;
5190:   MatAssemblyEnd_InUse++;
5191:   if (MatAssemblyEnd_InUse == 1) { /* Do the logging only the first time through */
5192:     PetscLogEventBegin(MAT_AssemblyEnd,mat,0,0,0);
5193:     if (mat->ops->assemblyend) {
5194:       (*mat->ops->assemblyend)(mat,type);
5195:     }
5196:     PetscLogEventEnd(MAT_AssemblyEnd,mat,0,0,0);
5197:   } else if (mat->ops->assemblyend) {
5198:     (*mat->ops->assemblyend)(mat,type);
5199:   }

5201:   /* Flush assembly is not a true assembly */
5202:   if (type != MAT_FLUSH_ASSEMBLY) {
5203:     mat->assembled = PETSC_TRUE; mat->num_ass++;
5204:   }
5205:   mat->insertmode = NOT_SET_VALUES;
5206:   MatAssemblyEnd_InUse--;
5207:   PetscObjectStateIncrease((PetscObject)mat);
5208:   if (!mat->symmetric_eternal) {
5209:     mat->symmetric_set              = PETSC_FALSE;
5210:     mat->hermitian_set              = PETSC_FALSE;
5211:     mat->structurally_symmetric_set = PETSC_FALSE;
5212:   }
5213: #if defined(PETSC_HAVE_CUSP)
5214:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5215:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5216:   }
5217: #elif defined(PETSC_HAVE_VIENNACL)
5218:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5219:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5220:   }
5221: #elif defined(PETSC_HAVE_VECCUDA)
5222:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5223:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5224:   }
5225: #endif
5226:   if (inassm == 1 && type != MAT_FLUSH_ASSEMBLY) {
5227:     MatViewFromOptions(mat,NULL,"-mat_view");

5229:     if (mat->checksymmetryonassembly) {
5230:       MatIsSymmetric(mat,mat->checksymmetrytol,&flg);
5231:       if (flg) {
5232:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5233:       } else {
5234:         PetscPrintf(PetscObjectComm((PetscObject)mat),"Matrix is not symmetric (tolerance %g)\n",(double)mat->checksymmetrytol);
5235:       }
5236:     }
5237:     if (mat->nullsp && mat->checknullspaceonassembly) {
5238:       MatNullSpaceTest(mat->nullsp,mat,NULL);
5239:     }
5240:   }
5241:   inassm--;
5242:   return(0);
5243: }

5247: /*@
5248:    MatSetOption - Sets a parameter option for a matrix. Some options
5249:    may be specific to certain storage formats.  Some options
5250:    determine how values will be inserted (or added). Sorted,
5251:    row-oriented input will generally assemble the fastest. The default
5252:    is row-oriented.

5254:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5256:    Input Parameters:
5257: +  mat - the matrix
5258: .  option - the option, one of those listed below (and possibly others),
5259: -  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5261:   Options Describing Matrix Structure:
5262: +    MAT_SPD - symmetric positive definite
5263: .    MAT_SYMMETRIC - symmetric in terms of both structure and value
5264: .    MAT_HERMITIAN - transpose is the complex conjugation
5265: .    MAT_STRUCTURALLY_SYMMETRIC - symmetric nonzero structure
5266: -    MAT_SYMMETRY_ETERNAL - if you would like the symmetry/Hermitian flag
5267:                             you set to be kept with all future use of the matrix
5268:                             including after MatAssemblyBegin/End() which could
5269:                             potentially change the symmetry structure, i.e. you
5270:                             KNOW the matrix will ALWAYS have the property you set.


5273:    Options For Use with MatSetValues():
5274:    Insert a logically dense subblock, which can be
5275: .    MAT_ROW_ORIENTED - row-oriented (default)

5277:    Note these options reflect the data you pass in with MatSetValues(); it has
5278:    nothing to do with how the data is stored internally in the matrix
5279:    data structure.

5281:    When (re)assembling a matrix, we can restrict the input for
5282:    efficiency/debugging purposes.  These options include:
5283: +    MAT_NEW_NONZERO_LOCATIONS - additional insertions will be allowed if they generate a new nonzero (slow)
5284: .    MAT_NEW_DIAGONALS - new diagonals will be allowed (for block diagonal format only)
5285: .    MAT_IGNORE_OFF_PROC_ENTRIES - drops off-processor entries
5286: .    MAT_NEW_NONZERO_LOCATION_ERR - generates an error for new matrix entry
5287: .    MAT_USE_HASH_TABLE - uses a hash table to speed up matrix assembly
5288: .    MAT_NO_OFF_PROC_ENTRIES - you know each process will only set values for its own rows, will generate an error if
5289:         any process sets values for another process. This avoids all reductions in the MatAssembly routines and thus improves
5290:         performance for very large process counts.
5291: -    MAT_SUBSET_OFF_PROC_ENTRIES - you know that the first assembly after setting this flag will set a superset
5292:         of the off-process entries required for all subsequent assemblies. This avoids a rendezvous step in the MatAssembly
5293:         functions, instead sending only neighbor messages.

5295:    Notes:
5296:    Except for MAT_UNUSED_NONZERO_LOCATION_ERR and  MAT_ROW_ORIENTED all processes that share the matrix must pass the same value in flg!

5298:    Some options are relevant only for particular matrix types and
5299:    are thus ignored by others.  Other options are not supported by
5300:    certain matrix types and will generate an error message if set.

5302:    If using a Fortran 77 module to compute a matrix, one may need to
5303:    use the column-oriented option (or convert to the row-oriented
5304:    format).

5306:    MAT_NEW_NONZERO_LOCATIONS set to PETSC_FALSE indicates that any add or insertion
5307:    that would generate a new entry in the nonzero structure is instead
5308:    ignored.  Thus, if memory has not alredy been allocated for this particular
5309:    data, then the insertion is ignored. For dense matrices, in which
5310:    the entire array is allocated, no entries are ever ignored.
5311:    Set after the first MatAssemblyEnd(). If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5313:    MAT_NEW_NONZERO_LOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5314:    that would generate a new entry in the nonzero structure instead produces
5315:    an error. (Currently supported for AIJ and BAIJ formats only.) If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5317:    MAT_NEW_NONZERO_ALLOCATION_ERR set to PETSC_TRUE indicates that any add or insertion
5318:    that would generate a new entry that has not been preallocated will
5319:    instead produce an error. (Currently supported for AIJ and BAIJ formats
5320:    only.) This is a useful flag when debugging matrix memory preallocation.
5321:    If this option is set then the MatAssemblyBegin/End() processes has one less global reduction

5323:    MAT_IGNORE_OFF_PROC_ENTRIES set to PETSC_TRUE indicates entries destined for
5324:    other processors should be dropped, rather than stashed.
5325:    This is useful if you know that the "owning" processor is also
5326:    always generating the correct matrix entries, so that PETSc need
5327:    not transfer duplicate entries generated on another processor.

5329:    MAT_USE_HASH_TABLE indicates that a hash table be used to improve the
5330:    searches during matrix assembly. When this flag is set, the hash table
5331:    is created during the first Matrix Assembly. This hash table is
5332:    used the next time through, during MatSetVaules()/MatSetVaulesBlocked()
5333:    to improve the searching of indices. MAT_NEW_NONZERO_LOCATIONS flag
5334:    should be used with MAT_USE_HASH_TABLE flag. This option is currently
5335:    supported by MATMPIBAIJ format only.

5337:    MAT_KEEP_NONZERO_PATTERN indicates when MatZeroRows() is called the zeroed entries
5338:    are kept in the nonzero structure

5340:    MAT_IGNORE_ZERO_ENTRIES - for AIJ/IS matrices this will stop zero values from creating
5341:    a zero location in the matrix

5343:    MAT_USE_INODES - indicates using inode version of the code - works with AIJ matrix types

5345:    MAT_NO_OFF_PROC_ZERO_ROWS - you know each process will only zero its own rows. This avoids all reductions in the
5346:         zero row routines and thus improves performance for very large process counts.

5348:    MAT_IGNORE_LOWER_TRIANGULAR - For SBAIJ matrices will ignore any insertions you make in the lower triangular
5349:         part of the matrix (since they should match the upper triangular part).

5351:    Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5353:    Level: intermediate

5355:    Concepts: matrices^setting options

5357: .seealso:  MatOption, Mat

5359: @*/
5360: PetscErrorCode MatSetOption(Mat mat,MatOption op,PetscBool flg)
5361: {

5367:   if (op > 0) {
5370:   }

5372:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5373:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot set options until type and size have been set, see MatSetType() and MatSetSizes()");

5375:   switch (op) {
5376:   case MAT_NO_OFF_PROC_ENTRIES:
5377:     mat->nooffprocentries = flg;
5378:     return(0);
5379:     break;
5380:   case MAT_SUBSET_OFF_PROC_ENTRIES:
5381:     mat->subsetoffprocentries = flg;
5382:     return(0);
5383:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5384:     mat->nooffproczerorows = flg;
5385:     return(0);
5386:     break;
5387:   case MAT_SPD:
5388:     mat->spd_set = PETSC_TRUE;
5389:     mat->spd     = flg;
5390:     if (flg) {
5391:       mat->symmetric                  = PETSC_TRUE;
5392:       mat->structurally_symmetric     = PETSC_TRUE;
5393:       mat->symmetric_set              = PETSC_TRUE;
5394:       mat->structurally_symmetric_set = PETSC_TRUE;
5395:     }
5396:     break;
5397:   case MAT_SYMMETRIC:
5398:     mat->symmetric = flg;
5399:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5400:     mat->symmetric_set              = PETSC_TRUE;
5401:     mat->structurally_symmetric_set = flg;
5402:     break;
5403:   case MAT_HERMITIAN:
5404:     mat->hermitian = flg;
5405:     if (flg) mat->structurally_symmetric = PETSC_TRUE;
5406:     mat->hermitian_set              = PETSC_TRUE;
5407:     mat->structurally_symmetric_set = flg;
5408:     break;
5409:   case MAT_STRUCTURALLY_SYMMETRIC:
5410:     mat->structurally_symmetric     = flg;
5411:     mat->structurally_symmetric_set = PETSC_TRUE;
5412:     break;
5413:   case MAT_SYMMETRY_ETERNAL:
5414:     mat->symmetric_eternal = flg;
5415:     break;
5416:   default:
5417:     break;
5418:   }
5419:   if (mat->ops->setoption) {
5420:     (*mat->ops->setoption)(mat,op,flg);
5421:   }
5422:   return(0);
5423: }

5427: /*@
5428:    MatGetOption - Gets a parameter option that has been set for a matrix.

5430:    Logically Collective on Mat for certain operations, such as MAT_SPD, not collective for MAT_ROW_ORIENTED, see MatOption

5432:    Input Parameters:
5433: +  mat - the matrix
5434: -  option - the option, this only responds to certain options, check the code for which ones

5436:    Output Parameter:
5437: .  flg - turn the option on (PETSC_TRUE) or off (PETSC_FALSE)

5439:     Notes: Can only be called after MatSetSizes() and MatSetType() have been set.

5441:    Level: intermediate

5443:    Concepts: matrices^setting options

5445: .seealso:  MatOption, MatSetOption()

5447: @*/
5448: PetscErrorCode MatGetOption(Mat mat,MatOption op,PetscBool *flg)
5449: {

5454:   if (((int) op) <= MAT_OPTION_MIN || ((int) op) >= MAT_OPTION_MAX) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Options %d is out of range",(int)op);
5455:   if (!((PetscObject)mat)->type_name) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_TYPENOTSET,"Cannot get options until type and size have been set, see MatSetType() and MatSetSizes()");

5457:   switch (op) {
5458:   case MAT_NO_OFF_PROC_ENTRIES:
5459:     *flg = mat->nooffprocentries;
5460:     break;
5461:   case MAT_NO_OFF_PROC_ZERO_ROWS:
5462:     *flg = mat->nooffproczerorows;
5463:     break;
5464:   case MAT_SYMMETRIC:
5465:     *flg = mat->symmetric;
5466:     break;
5467:   case MAT_HERMITIAN:
5468:     *flg = mat->hermitian;
5469:     break;
5470:   case MAT_STRUCTURALLY_SYMMETRIC:
5471:     *flg = mat->structurally_symmetric;
5472:     break;
5473:   case MAT_SYMMETRY_ETERNAL:
5474:     *flg = mat->symmetric_eternal;
5475:     break;
5476:   default:
5477:     break;
5478:   }
5479:   return(0);
5480: }

5484: /*@
5485:    MatZeroEntries - Zeros all entries of a matrix.  For sparse matrices
5486:    this routine retains the old nonzero structure.

5488:    Logically Collective on Mat

5490:    Input Parameters:
5491: .  mat - the matrix

5493:    Level: intermediate

5495:    Notes: If the matrix was not preallocated then a default, likely poor preallocation will be set in the matrix, so this should be called after the preallocation phase.
5496:    See the Performance chapter of the users manual for information on preallocating matrices.

5498:    Concepts: matrices^zeroing

5500: .seealso: MatZeroRows()
5501: @*/
5502: PetscErrorCode MatZeroEntries(Mat mat)
5503: {

5509:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5510:   if (mat->insertmode != NOT_SET_VALUES) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for matrices where you have set values but not yet assembled");
5511:   if (!mat->ops->zeroentries) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5512:   MatCheckPreallocated(mat,1);

5514:   PetscLogEventBegin(MAT_ZeroEntries,mat,0,0,0);
5515:   (*mat->ops->zeroentries)(mat);
5516:   PetscLogEventEnd(MAT_ZeroEntries,mat,0,0,0);
5517:   PetscObjectStateIncrease((PetscObject)mat);
5518: #if defined(PETSC_HAVE_CUSP)
5519:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5520:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5521:   }
5522: #elif defined(PETSC_HAVE_VIENNACL)
5523:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5524:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5525:   }
5526: #elif defined(PETSC_HAVE_VECCUDA)
5527:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5528:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5529:   }
5530: #endif
5531:   return(0);
5532: }

5536: /*@C
5537:    MatZeroRowsColumns - Zeros all entries (except possibly the main diagonal)
5538:    of a set of rows and columns of a matrix.

5540:    Collective on Mat

5542:    Input Parameters:
5543: +  mat - the matrix
5544: .  numRows - the number of rows to remove
5545: .  rows - the global row indices
5546: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5547: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5548: -  b - optional vector of right hand side, that will be adjusted by provided solution

5550:    Notes:
5551:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5553:    The user can set a value in the diagonal entry (or for the AIJ and
5554:    row formats can optionally remove the main diagonal entry from the
5555:    nonzero structure as well, by passing 0.0 as the final argument).

5557:    For the parallel case, all processes that share the matrix (i.e.,
5558:    those in the communicator used for matrix creation) MUST call this
5559:    routine, regardless of whether any rows being zeroed are owned by
5560:    them.

5562:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5563:    list only rows local to itself).

5565:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5567:    Level: intermediate

5569:    Concepts: matrices^zeroing rows

5571: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumnsIS()
5572: @*/
5573: PetscErrorCode MatZeroRowsColumns(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5574: {

5581:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5582:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5583:   if (!mat->ops->zerorowscolumns) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5584:   MatCheckPreallocated(mat,1);

5586:   (*mat->ops->zerorowscolumns)(mat,numRows,rows,diag,x,b);
5587:   MatViewFromOptions(mat,NULL,"-mat_view");
5588:   PetscObjectStateIncrease((PetscObject)mat);
5589: #if defined(PETSC_HAVE_CUSP)
5590:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5591:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5592:   }
5593: #elif defined(PETSC_HAVE_VIENNACL)
5594:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5595:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5596:   }
5597: #elif defined(PETSC_HAVE_VECCUDA)
5598:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5599:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5600:   }
5601: #endif
5602:   return(0);
5603: }

5607: /*@C
5608:    MatZeroRowsColumnsIS - Zeros all entries (except possibly the main diagonal)
5609:    of a set of rows and columns of a matrix.

5611:    Collective on Mat

5613:    Input Parameters:
5614: +  mat - the matrix
5615: .  is - the rows to zero
5616: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5617: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5618: -  b - optional vector of right hand side, that will be adjusted by provided solution

5620:    Notes:
5621:    This does not change the nonzero structure of the matrix, it merely zeros those entries in the matrix.

5623:    The user can set a value in the diagonal entry (or for the AIJ and
5624:    row formats can optionally remove the main diagonal entry from the
5625:    nonzero structure as well, by passing 0.0 as the final argument).

5627:    For the parallel case, all processes that share the matrix (i.e.,
5628:    those in the communicator used for matrix creation) MUST call this
5629:    routine, regardless of whether any rows being zeroed are owned by
5630:    them.

5632:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5633:    list only rows local to itself).

5635:    The option MAT_NO_OFF_PROC_ZERO_ROWS does not apply to this routine.

5637:    Level: intermediate

5639:    Concepts: matrices^zeroing rows

5641: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption(), MatZeroRowsColumns()
5642: @*/
5643: PetscErrorCode MatZeroRowsColumnsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5644: {
5646:   PetscInt       numRows;
5647:   const PetscInt *rows;

5654:   ISGetLocalSize(is,&numRows);
5655:   ISGetIndices(is,&rows);
5656:   MatZeroRowsColumns(mat,numRows,rows,diag,x,b);
5657:   ISRestoreIndices(is,&rows);
5658:   return(0);
5659: }

5663: /*@C
5664:    MatZeroRows - Zeros all entries (except possibly the main diagonal)
5665:    of a set of rows of a matrix.

5667:    Collective on Mat

5669:    Input Parameters:
5670: +  mat - the matrix
5671: .  numRows - the number of rows to remove
5672: .  rows - the global row indices
5673: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5674: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5675: -  b - optional vector of right hand side, that will be adjusted by provided solution

5677:    Notes:
5678:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5679:    but does not release memory.  For the dense and block diagonal
5680:    formats this does not alter the nonzero structure.

5682:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5683:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5684:    merely zeroed.

5686:    The user can set a value in the diagonal entry (or for the AIJ and
5687:    row formats can optionally remove the main diagonal entry from the
5688:    nonzero structure as well, by passing 0.0 as the final argument).

5690:    For the parallel case, all processes that share the matrix (i.e.,
5691:    those in the communicator used for matrix creation) MUST call this
5692:    routine, regardless of whether any rows being zeroed are owned by
5693:    them.

5695:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5696:    list only rows local to itself).

5698:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5699:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5701:    Level: intermediate

5703:    Concepts: matrices^zeroing rows

5705: .seealso: MatZeroRowsIS(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5706: @*/
5707: PetscErrorCode MatZeroRows(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
5708: {

5715:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
5716:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
5717:   if (!mat->ops->zerorows) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
5718:   MatCheckPreallocated(mat,1);

5720:   (*mat->ops->zerorows)(mat,numRows,rows,diag,x,b);
5721:   MatViewFromOptions(mat,NULL,"-mat_view");
5722:   PetscObjectStateIncrease((PetscObject)mat);
5723: #if defined(PETSC_HAVE_CUSP)
5724:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
5725:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
5726:   }
5727: #elif defined(PETSC_HAVE_VIENNACL)
5728:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
5729:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
5730:   }
5731: #elif defined(PETSC_HAVE_VECCUDA)
5732:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
5733:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
5734:   }
5735: #endif
5736:   return(0);
5737: }

5741: /*@C
5742:    MatZeroRowsIS - Zeros all entries (except possibly the main diagonal)
5743:    of a set of rows of a matrix.

5745:    Collective on Mat

5747:    Input Parameters:
5748: +  mat - the matrix
5749: .  is - index set of rows to remove
5750: .  diag - value put in all diagonals of eliminated rows
5751: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5752: -  b - optional vector of right hand side, that will be adjusted by provided solution

5754:    Notes:
5755:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5756:    but does not release memory.  For the dense and block diagonal
5757:    formats this does not alter the nonzero structure.

5759:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5760:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5761:    merely zeroed.

5763:    The user can set a value in the diagonal entry (or for the AIJ and
5764:    row formats can optionally remove the main diagonal entry from the
5765:    nonzero structure as well, by passing 0.0 as the final argument).

5767:    For the parallel case, all processes that share the matrix (i.e.,
5768:    those in the communicator used for matrix creation) MUST call this
5769:    routine, regardless of whether any rows being zeroed are owned by
5770:    them.

5772:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5773:    list only rows local to itself).

5775:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
5776:    owns that are to be zeroed. This saves a global synchronization in the implementation.

5778:    Level: intermediate

5780:    Concepts: matrices^zeroing rows

5782: .seealso: MatZeroRows(), MatZeroRowsStencil(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5783: @*/
5784: PetscErrorCode MatZeroRowsIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
5785: {
5786:   PetscInt       numRows;
5787:   const PetscInt *rows;

5794:   ISGetLocalSize(is,&numRows);
5795:   ISGetIndices(is,&rows);
5796:   MatZeroRows(mat,numRows,rows,diag,x,b);
5797:   ISRestoreIndices(is,&rows);
5798:   return(0);
5799: }

5803: /*@C
5804:    MatZeroRowsStencil - Zeros all entries (except possibly the main diagonal)
5805:    of a set of rows of a matrix. These rows must be local to the process.

5807:    Collective on Mat

5809:    Input Parameters:
5810: +  mat - the matrix
5811: .  numRows - the number of rows to remove
5812: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5813: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5814: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5815: -  b - optional vector of right hand side, that will be adjusted by provided solution

5817:    Notes:
5818:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5819:    but does not release memory.  For the dense and block diagonal
5820:    formats this does not alter the nonzero structure.

5822:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5823:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5824:    merely zeroed.

5826:    The user can set a value in the diagonal entry (or for the AIJ and
5827:    row formats can optionally remove the main diagonal entry from the
5828:    nonzero structure as well, by passing 0.0 as the final argument).

5830:    For the parallel case, all processes that share the matrix (i.e.,
5831:    those in the communicator used for matrix creation) MUST call this
5832:    routine, regardless of whether any rows being zeroed are owned by
5833:    them.

5835:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5836:    list only rows local to itself).

5838:    The grid coordinates are across the entire grid, not just the local portion

5840:    In Fortran idxm and idxn should be declared as
5841: $     MatStencil idxm(4,m)
5842:    and the values inserted using
5843: $    idxm(MatStencil_i,1) = i
5844: $    idxm(MatStencil_j,1) = j
5845: $    idxm(MatStencil_k,1) = k
5846: $    idxm(MatStencil_c,1) = c
5847:    etc

5849:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5850:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5851:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5852:    DM_BOUNDARY_PERIODIC boundary type.

5854:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5855:    a single value per point) you can skip filling those indices.

5857:    Level: intermediate

5859:    Concepts: matrices^zeroing rows

5861: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5862: @*/
5863: PetscErrorCode MatZeroRowsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5864: {
5865:   PetscInt       dim     = mat->stencil.dim;
5866:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
5867:   PetscInt       *dims   = mat->stencil.dims+1;
5868:   PetscInt       *starts = mat->stencil.starts;
5869:   PetscInt       *dxm    = (PetscInt*) rows;
5870:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


5878:   PetscMalloc1(numRows, &jdxm);
5879:   for (i = 0; i < numRows; ++i) {
5880:     /* Skip unused dimensions (they are ordered k, j, i, c) */
5881:     for (j = 0; j < 3-sdim; ++j) dxm++;
5882:     /* Local index in X dir */
5883:     tmp = *dxm++ - starts[0];
5884:     /* Loop over remaining dimensions */
5885:     for (j = 0; j < dim-1; ++j) {
5886:       /* If nonlocal, set index to be negative */
5887:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5888:       /* Update local index */
5889:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5890:     }
5891:     /* Skip component slot if necessary */
5892:     if (mat->stencil.noc) dxm++;
5893:     /* Local row number */
5894:     if (tmp >= 0) {
5895:       jdxm[numNewRows++] = tmp;
5896:     }
5897:   }
5898:   MatZeroRowsLocal(mat,numNewRows,jdxm,diag,x,b);
5899:   PetscFree(jdxm);
5900:   return(0);
5901: }

5905: /*@C
5906:    MatZeroRowsColumnsStencil - Zeros all row and column entries (except possibly the main diagonal)
5907:    of a set of rows and columns of a matrix.

5909:    Collective on Mat

5911:    Input Parameters:
5912: +  mat - the matrix
5913: .  numRows - the number of rows/columns to remove
5914: .  rows - the grid coordinates (and component number when dof > 1) for matrix rows
5915: .  diag - value put in all diagonals of eliminated rows (0.0 will even eliminate diagonal entry)
5916: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
5917: -  b - optional vector of right hand side, that will be adjusted by provided solution

5919:    Notes:
5920:    For the AIJ and BAIJ matrix formats this removes the old nonzero structure,
5921:    but does not release memory.  For the dense and block diagonal
5922:    formats this does not alter the nonzero structure.

5924:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
5925:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
5926:    merely zeroed.

5928:    The user can set a value in the diagonal entry (or for the AIJ and
5929:    row formats can optionally remove the main diagonal entry from the
5930:    nonzero structure as well, by passing 0.0 as the final argument).

5932:    For the parallel case, all processes that share the matrix (i.e.,
5933:    those in the communicator used for matrix creation) MUST call this
5934:    routine, regardless of whether any rows being zeroed are owned by
5935:    them.

5937:    Each processor can indicate any rows in the entire matrix to be zeroed (i.e. each process does NOT have to
5938:    list only rows local to itself, but the row/column numbers are given in local numbering).

5940:    The grid coordinates are across the entire grid, not just the local portion

5942:    In Fortran idxm and idxn should be declared as
5943: $     MatStencil idxm(4,m)
5944:    and the values inserted using
5945: $    idxm(MatStencil_i,1) = i
5946: $    idxm(MatStencil_j,1) = j
5947: $    idxm(MatStencil_k,1) = k
5948: $    idxm(MatStencil_c,1) = c
5949:    etc

5951:    For periodic boundary conditions use negative indices for values to the left (below 0; that are to be
5952:    obtained by wrapping values from right edge). For values to the right of the last entry using that index plus one
5953:    etc to obtain values that obtained by wrapping the values from the left edge. This does not work for anything but the
5954:    DM_BOUNDARY_PERIODIC boundary type.

5956:    For indices that don't mean anything for your case (like the k index when working in 2d) or the c index when you have
5957:    a single value per point) you can skip filling those indices.

5959:    Level: intermediate

5961:    Concepts: matrices^zeroing rows

5963: .seealso: MatZeroRows(), MatZeroRowsIS(), MatZeroEntries(), MatZeroRowsLocal(), MatSetOption()
5964: @*/
5965: PetscErrorCode MatZeroRowsColumnsStencil(Mat mat,PetscInt numRows,const MatStencil rows[],PetscScalar diag,Vec x,Vec b)
5966: {
5967:   PetscInt       dim     = mat->stencil.dim;
5968:   PetscInt       sdim    = dim - (1 - (PetscInt) mat->stencil.noc);
5969:   PetscInt       *dims   = mat->stencil.dims+1;
5970:   PetscInt       *starts = mat->stencil.starts;
5971:   PetscInt       *dxm    = (PetscInt*) rows;
5972:   PetscInt       *jdxm, i, j, tmp, numNewRows = 0;


5980:   PetscMalloc1(numRows, &jdxm);
5981:   for (i = 0; i < numRows; ++i) {
5982:     /* Skip unused dimensions (they are ordered k, j, i, c) */
5983:     for (j = 0; j < 3-sdim; ++j) dxm++;
5984:     /* Local index in X dir */
5985:     tmp = *dxm++ - starts[0];
5986:     /* Loop over remaining dimensions */
5987:     for (j = 0; j < dim-1; ++j) {
5988:       /* If nonlocal, set index to be negative */
5989:       if ((*dxm++ - starts[j+1]) < 0 || tmp < 0) tmp = PETSC_MIN_INT;
5990:       /* Update local index */
5991:       else tmp = tmp*dims[j] + *(dxm-1) - starts[j+1];
5992:     }
5993:     /* Skip component slot if necessary */
5994:     if (mat->stencil.noc) dxm++;
5995:     /* Local row number */
5996:     if (tmp >= 0) {
5997:       jdxm[numNewRows++] = tmp;
5998:     }
5999:   }
6000:   MatZeroRowsColumnsLocal(mat,numNewRows,jdxm,diag,x,b);
6001:   PetscFree(jdxm);
6002:   return(0);
6003: }

6007: /*@C
6008:    MatZeroRowsLocal - Zeros all entries (except possibly the main diagonal)
6009:    of a set of rows of a matrix; using local numbering of rows.

6011:    Collective on Mat

6013:    Input Parameters:
6014: +  mat - the matrix
6015: .  numRows - the number of rows to remove
6016: .  rows - the global row indices
6017: .  diag - value put in all diagonals of eliminated rows
6018: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6019: -  b - optional vector of right hand side, that will be adjusted by provided solution

6021:    Notes:
6022:    Before calling MatZeroRowsLocal(), the user must first set the
6023:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6025:    For the AIJ matrix formats this removes the old nonzero structure,
6026:    but does not release memory.  For the dense and block diagonal
6027:    formats this does not alter the nonzero structure.

6029:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6030:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6031:    merely zeroed.

6033:    The user can set a value in the diagonal entry (or for the AIJ and
6034:    row formats can optionally remove the main diagonal entry from the
6035:    nonzero structure as well, by passing 0.0 as the final argument).

6037:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6038:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6040:    Level: intermediate

6042:    Concepts: matrices^zeroing

6044: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6045: @*/
6046: PetscErrorCode MatZeroRowsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6047: {

6054:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6055:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6056:   MatCheckPreallocated(mat,1);

6058:   if (mat->ops->zerorowslocal) {
6059:     (*mat->ops->zerorowslocal)(mat,numRows,rows,diag,x,b);
6060:   } else {
6061:     IS             is, newis;
6062:     const PetscInt *newRows;

6064:     if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6065:     ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6066:     ISLocalToGlobalMappingApplyIS(mat->rmap->mapping,is,&newis);
6067:     ISGetIndices(newis,&newRows);
6068:     (*mat->ops->zerorows)(mat,numRows,newRows,diag,x,b);
6069:     ISRestoreIndices(newis,&newRows);
6070:     ISDestroy(&newis);
6071:     ISDestroy(&is);
6072:   }
6073:   PetscObjectStateIncrease((PetscObject)mat);
6074: #if defined(PETSC_HAVE_CUSP)
6075:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6076:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6077:   }
6078: #elif defined(PETSC_HAVE_VIENNACL)
6079:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6080:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6081:   }
6082: #elif defined(PETSC_HAVE_VECCUDA)
6083:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6084:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6085:   }
6086: #endif
6087:   return(0);
6088: }

6092: /*@C
6093:    MatZeroRowsLocalIS - Zeros all entries (except possibly the main diagonal)
6094:    of a set of rows of a matrix; using local numbering of rows.

6096:    Collective on Mat

6098:    Input Parameters:
6099: +  mat - the matrix
6100: .  is - index set of rows to remove
6101: .  diag - value put in all diagonals of eliminated rows
6102: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6103: -  b - optional vector of right hand side, that will be adjusted by provided solution

6105:    Notes:
6106:    Before calling MatZeroRowsLocalIS(), the user must first set the
6107:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6109:    For the AIJ matrix formats this removes the old nonzero structure,
6110:    but does not release memory.  For the dense and block diagonal
6111:    formats this does not alter the nonzero structure.

6113:    If the option MatSetOption(mat,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE) the nonzero structure
6114:    of the matrix is not changed (even for AIJ and BAIJ matrices) the values are
6115:    merely zeroed.

6117:    The user can set a value in the diagonal entry (or for the AIJ and
6118:    row formats can optionally remove the main diagonal entry from the
6119:    nonzero structure as well, by passing 0.0 as the final argument).

6121:    You can call MatSetOption(mat,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) if each process indicates only rows it
6122:    owns that are to be zeroed. This saves a global synchronization in the implementation.

6124:    Level: intermediate

6126:    Concepts: matrices^zeroing

6128: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6129: @*/
6130: PetscErrorCode MatZeroRowsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6131: {
6133:   PetscInt       numRows;
6134:   const PetscInt *rows;

6140:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6141:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6142:   MatCheckPreallocated(mat,1);

6144:   ISGetLocalSize(is,&numRows);
6145:   ISGetIndices(is,&rows);
6146:   MatZeroRowsLocal(mat,numRows,rows,diag,x,b);
6147:   ISRestoreIndices(is,&rows);
6148:   return(0);
6149: }

6153: /*@C
6154:    MatZeroRowsColumnsLocal - Zeros all entries (except possibly the main diagonal)
6155:    of a set of rows and columns of a matrix; using local numbering of rows.

6157:    Collective on Mat

6159:    Input Parameters:
6160: +  mat - the matrix
6161: .  numRows - the number of rows to remove
6162: .  rows - the global row indices
6163: .  diag - value put in all diagonals of eliminated rows
6164: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6165: -  b - optional vector of right hand side, that will be adjusted by provided solution

6167:    Notes:
6168:    Before calling MatZeroRowsColumnsLocal(), the user must first set the
6169:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6171:    The user can set a value in the diagonal entry (or for the AIJ and
6172:    row formats can optionally remove the main diagonal entry from the
6173:    nonzero structure as well, by passing 0.0 as the final argument).

6175:    Level: intermediate

6177:    Concepts: matrices^zeroing

6179: .seealso: MatZeroRows(), MatZeroRowsLocalIS(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6180: @*/
6181: PetscErrorCode MatZeroRowsColumnsLocal(Mat mat,PetscInt numRows,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
6182: {
6184:   IS             is, newis;
6185:   const PetscInt *newRows;

6191:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6192:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6193:   MatCheckPreallocated(mat,1);

6195:   if (!mat->cmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Need to provide local to global mapping to matrix first");
6196:   ISCreateGeneral(PETSC_COMM_SELF,numRows,rows,PETSC_COPY_VALUES,&is);
6197:   ISLocalToGlobalMappingApplyIS(mat->cmap->mapping,is,&newis);
6198:   ISGetIndices(newis,&newRows);
6199:   (*mat->ops->zerorowscolumns)(mat,numRows,newRows,diag,x,b);
6200:   ISRestoreIndices(newis,&newRows);
6201:   ISDestroy(&newis);
6202:   ISDestroy(&is);
6203:   PetscObjectStateIncrease((PetscObject)mat);
6204: #if defined(PETSC_HAVE_CUSP)
6205:   if (mat->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) {
6206:     mat->valid_GPU_matrix = PETSC_CUSP_CPU;
6207:   }
6208: #elif defined(PETSC_HAVE_VIENNACL)
6209:   if (mat->valid_GPU_matrix != PETSC_VIENNACL_UNALLOCATED) {
6210:     mat->valid_GPU_matrix = PETSC_VIENNACL_CPU;
6211:   }
6212: #elif defined(PETSC_HAVE_VECCUDA)
6213:   if (mat->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) {
6214:     mat->valid_GPU_matrix = PETSC_CUDA_CPU;
6215:   }
6216: #endif
6217:   return(0);
6218: }

6222: /*@C
6223:    MatZeroRowsColumnsLocalIS - Zeros all entries (except possibly the main diagonal)
6224:    of a set of rows and columns of a matrix; using local numbering of rows.

6226:    Collective on Mat

6228:    Input Parameters:
6229: +  mat - the matrix
6230: .  is - index set of rows to remove
6231: .  diag - value put in all diagonals of eliminated rows
6232: .  x - optional vector of solutions for zeroed rows (other entries in vector are not used)
6233: -  b - optional vector of right hand side, that will be adjusted by provided solution

6235:    Notes:
6236:    Before calling MatZeroRowsColumnsLocalIS(), the user must first set the
6237:    local-to-global mapping by calling MatSetLocalToGlobalMapping().

6239:    The user can set a value in the diagonal entry (or for the AIJ and
6240:    row formats can optionally remove the main diagonal entry from the
6241:    nonzero structure as well, by passing 0.0 as the final argument).

6243:    Level: intermediate

6245:    Concepts: matrices^zeroing

6247: .seealso: MatZeroRows(), MatZeroRowsLocal(), MatZeroEntries(), MatZeroRows(), MatSetLocalToGlobalMapping
6248: @*/
6249: PetscErrorCode MatZeroRowsColumnsLocalIS(Mat mat,IS is,PetscScalar diag,Vec x,Vec b)
6250: {
6252:   PetscInt       numRows;
6253:   const PetscInt *rows;

6259:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6260:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6261:   MatCheckPreallocated(mat,1);

6263:   ISGetLocalSize(is,&numRows);
6264:   ISGetIndices(is,&rows);
6265:   MatZeroRowsColumnsLocal(mat,numRows,rows,diag,x,b);
6266:   ISRestoreIndices(is,&rows);
6267:   return(0);
6268: }

6272: /*@
6273:    MatGetSize - Returns the numbers of rows and columns in a matrix.

6275:    Not Collective

6277:    Input Parameter:
6278: .  mat - the matrix

6280:    Output Parameters:
6281: +  m - the number of global rows
6282: -  n - the number of global columns

6284:    Note: both output parameters can be NULL on input.

6286:    Level: beginner

6288:    Concepts: matrices^size

6290: .seealso: MatGetLocalSize()
6291: @*/
6292: PetscErrorCode MatGetSize(Mat mat,PetscInt *m,PetscInt *n)
6293: {
6296:   if (m) *m = mat->rmap->N;
6297:   if (n) *n = mat->cmap->N;
6298:   return(0);
6299: }

6303: /*@
6304:    MatGetLocalSize - Returns the number of rows and columns in a matrix
6305:    stored locally.  This information may be implementation dependent, so
6306:    use with care.

6308:    Not Collective

6310:    Input Parameters:
6311: .  mat - the matrix

6313:    Output Parameters:
6314: +  m - the number of local rows
6315: -  n - the number of local columns

6317:    Note: both output parameters can be NULL on input.

6319:    Level: beginner

6321:    Concepts: matrices^local size

6323: .seealso: MatGetSize()
6324: @*/
6325: PetscErrorCode MatGetLocalSize(Mat mat,PetscInt *m,PetscInt *n)
6326: {
6331:   if (m) *m = mat->rmap->n;
6332:   if (n) *n = mat->cmap->n;
6333:   return(0);
6334: }

6338: /*@
6339:    MatGetOwnershipRangeColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6340:    this processor. (The columns of the "diagonal block")

6342:    Not Collective, unless matrix has not been allocated, then collective on Mat

6344:    Input Parameters:
6345: .  mat - the matrix

6347:    Output Parameters:
6348: +  m - the global index of the first local column
6349: -  n - one more than the global index of the last local column

6351:    Notes: both output parameters can be NULL on input.

6353:    Level: developer

6355:    Concepts: matrices^column ownership

6357: .seealso:  MatGetOwnershipRange(), MatGetOwnershipRanges(), MatGetOwnershipRangesColumn()

6359: @*/
6360: PetscErrorCode MatGetOwnershipRangeColumn(Mat mat,PetscInt *m,PetscInt *n)
6361: {
6367:   MatCheckPreallocated(mat,1);
6368:   if (m) *m = mat->cmap->rstart;
6369:   if (n) *n = mat->cmap->rend;
6370:   return(0);
6371: }

6375: /*@
6376:    MatGetOwnershipRange - Returns the range of matrix rows owned by
6377:    this processor, assuming that the matrix is laid out with the first
6378:    n1 rows on the first processor, the next n2 rows on the second, etc.
6379:    For certain parallel layouts this range may not be well defined.

6381:    Not Collective

6383:    Input Parameters:
6384: .  mat - the matrix

6386:    Output Parameters:
6387: +  m - the global index of the first local row
6388: -  n - one more than the global index of the last local row

6390:    Note: Both output parameters can be NULL on input.
6391: $  This function requires that the matrix be preallocated. If you have not preallocated, consider using
6392: $    PetscSplitOwnership(MPI_Comm comm, PetscInt *n, PetscInt *N)
6393: $  and then MPI_Scan() to calculate prefix sums of the local sizes.

6395:    Level: beginner

6397:    Concepts: matrices^row ownership

6399: .seealso:   MatGetOwnershipRanges(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn(), PetscSplitOwnership(), PetscSplitOwnershipBlock()

6401: @*/
6402: PetscErrorCode MatGetOwnershipRange(Mat mat,PetscInt *m,PetscInt *n)
6403: {
6409:   MatCheckPreallocated(mat,1);
6410:   if (m) *m = mat->rmap->rstart;
6411:   if (n) *n = mat->rmap->rend;
6412:   return(0);
6413: }

6417: /*@C
6418:    MatGetOwnershipRanges - Returns the range of matrix rows owned by
6419:    each process

6421:    Not Collective, unless matrix has not been allocated, then collective on Mat

6423:    Input Parameters:
6424: .  mat - the matrix

6426:    Output Parameters:
6427: .  ranges - start of each processors portion plus one more than the total length at the end

6429:    Level: beginner

6431:    Concepts: matrices^row ownership

6433: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRangesColumn()

6435: @*/
6436: PetscErrorCode MatGetOwnershipRanges(Mat mat,const PetscInt **ranges)
6437: {

6443:   MatCheckPreallocated(mat,1);
6444:   PetscLayoutGetRanges(mat->rmap,ranges);
6445:   return(0);
6446: }

6450: /*@C
6451:    MatGetOwnershipRangesColumn - Returns the range of matrix columns associated with rows of a vector one multiplies by that owned by
6452:    this processor. (The columns of the "diagonal blocks" for each process)

6454:    Not Collective, unless matrix has not been allocated, then collective on Mat

6456:    Input Parameters:
6457: .  mat - the matrix

6459:    Output Parameters:
6460: .  ranges - start of each processors portion plus one more then the total length at the end

6462:    Level: beginner

6464:    Concepts: matrices^column ownership

6466: .seealso:   MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatGetOwnershipRanges()

6468: @*/
6469: PetscErrorCode MatGetOwnershipRangesColumn(Mat mat,const PetscInt **ranges)
6470: {

6476:   MatCheckPreallocated(mat,1);
6477:   PetscLayoutGetRanges(mat->cmap,ranges);
6478:   return(0);
6479: }

6483: /*@C
6484:    MatGetOwnershipIS - Get row and column ownership as index sets

6486:    Not Collective

6488:    Input Arguments:
6489: .  A - matrix of type Elemental

6491:    Output Arguments:
6492: +  rows - rows in which this process owns elements
6493: .  cols - columns in which this process owns elements

6495:    Level: intermediate

6497: .seealso: MatGetOwnershipRange(), MatGetOwnershipRangeColumn(), MatSetValues(), MATELEMENTAL, MatSetValues()
6498: @*/
6499: PetscErrorCode MatGetOwnershipIS(Mat A,IS *rows,IS *cols)
6500: {
6501:   PetscErrorCode ierr,(*f)(Mat,IS*,IS*);

6504:   MatCheckPreallocated(A,1);
6505:   PetscObjectQueryFunction((PetscObject)A,"MatGetOwnershipIS_C",&f);
6506:   if (f) {
6507:     (*f)(A,rows,cols);
6508:   } else {   /* Create a standard row-based partition, each process is responsible for ALL columns in their row block */
6509:     if (rows) {ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,rows);}
6510:     if (cols) {ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,cols);}
6511:   }
6512:   return(0);
6513: }

6517: /*@C
6518:    MatILUFactorSymbolic - Performs symbolic ILU factorization of a matrix.
6519:    Uses levels of fill only, not drop tolerance. Use MatLUFactorNumeric()
6520:    to complete the factorization.

6522:    Collective on Mat

6524:    Input Parameters:
6525: +  mat - the matrix
6526: .  row - row permutation
6527: .  column - column permutation
6528: -  info - structure containing
6529: $      levels - number of levels of fill.
6530: $      expected fill - as ratio of original fill.
6531: $      1 or 0 - indicating force fill on diagonal (improves robustness for matrices
6532:                 missing diagonal entries)

6534:    Output Parameters:
6535: .  fact - new matrix that has been symbolically factored

6537:    Notes: See Users-Manual: ch_mat for additional information about choosing the fill factor for better efficiency.

6539:    Most users should employ the simplified KSP interface for linear solvers
6540:    instead of working directly with matrix algebra routines such as this.
6541:    See, e.g., KSPCreate().

6543:    Level: developer

6545:   Concepts: matrices^symbolic LU factorization
6546:   Concepts: matrices^factorization
6547:   Concepts: LU^symbolic factorization

6549: .seealso: MatLUFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()
6550:           MatGetOrdering(), MatFactorInfo

6552:     Developer Note: fortran interface is not autogenerated as the f90
6553:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6555: @*/
6556: PetscErrorCode MatILUFactorSymbolic(Mat fact,Mat mat,IS row,IS col,const MatFactorInfo *info)
6557: {

6567:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels of fill negative %D",(PetscInt)info->levels);
6568:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6569:   if (!(fact)->ops->ilufactorsymbolic) {
6570:     const MatSolverPackage spackage;
6571:     MatFactorGetSolverPackage(fact,&spackage);
6572:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ILU using solver package %s",((PetscObject)mat)->type_name,spackage);
6573:   }
6574:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6575:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6576:   MatCheckPreallocated(mat,2);

6578:   PetscLogEventBegin(MAT_ILUFactorSymbolic,mat,row,col,0);
6579:   (fact->ops->ilufactorsymbolic)(fact,mat,row,col,info);
6580:   PetscLogEventEnd(MAT_ILUFactorSymbolic,mat,row,col,0);
6581:   return(0);
6582: }

6586: /*@C
6587:    MatICCFactorSymbolic - Performs symbolic incomplete
6588:    Cholesky factorization for a symmetric matrix.  Use
6589:    MatCholeskyFactorNumeric() to complete the factorization.

6591:    Collective on Mat

6593:    Input Parameters:
6594: +  mat - the matrix
6595: .  perm - row and column permutation
6596: -  info - structure containing
6597: $      levels - number of levels of fill.
6598: $      expected fill - as ratio of original fill.

6600:    Output Parameter:
6601: .  fact - the factored matrix

6603:    Notes:
6604:    Most users should employ the KSP interface for linear solvers
6605:    instead of working directly with matrix algebra routines such as this.
6606:    See, e.g., KSPCreate().

6608:    Level: developer

6610:   Concepts: matrices^symbolic incomplete Cholesky factorization
6611:   Concepts: matrices^factorization
6612:   Concepts: Cholsky^symbolic factorization

6614: .seealso: MatCholeskyFactorNumeric(), MatCholeskyFactor(), MatFactorInfo

6616:     Developer Note: fortran interface is not autogenerated as the f90
6617:     interface defintion cannot be generated correctly [due to MatFactorInfo]

6619: @*/
6620: PetscErrorCode MatICCFactorSymbolic(Mat fact,Mat mat,IS perm,const MatFactorInfo *info)
6621: {

6630:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6631:   if (info->levels < 0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Levels negative %D",(PetscInt) info->levels);
6632:   if (info->fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Expected fill less than 1.0 %g",(double)info->fill);
6633:   if (!(fact)->ops->iccfactorsymbolic) {
6634:     const MatSolverPackage spackage;
6635:     MatFactorGetSolverPackage(fact,&spackage);
6636:     SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Matrix type %s symbolic ICC using solver package %s",((PetscObject)mat)->type_name,spackage);
6637:   }
6638:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6639:   MatCheckPreallocated(mat,2);

6641:   PetscLogEventBegin(MAT_ICCFactorSymbolic,mat,perm,0,0);
6642:   (fact->ops->iccfactorsymbolic)(fact,mat,perm,info);
6643:   PetscLogEventEnd(MAT_ICCFactorSymbolic,mat,perm,0,0);
6644:   return(0);
6645: }

6649: /*@C
6650:    MatGetSubMatrices - Extracts several submatrices from a matrix. If submat
6651:    points to an array of valid matrices, they may be reused to store the new
6652:    submatrices.

6654:    Collective on Mat

6656:    Input Parameters:
6657: +  mat - the matrix
6658: .  n   - the number of submatrixes to be extracted (on this processor, may be zero)
6659: .  irow, icol - index sets of rows and columns to extract
6660: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

6662:    Output Parameter:
6663: .  submat - the array of submatrices

6665:    Notes:
6666:    MatGetSubMatrices() can extract ONLY sequential submatrices
6667:    (from both sequential and parallel matrices). Use MatGetSubMatrix()
6668:    to extract a parallel submatrix.

6670:    Some matrix types place restrictions on the row and column
6671:    indices, such as that they be sorted or that they be equal to each other.

6673:    The index sets may not have duplicate entries.

6675:    When extracting submatrices from a parallel matrix, each processor can
6676:    form a different submatrix by setting the rows and columns of its
6677:    individual index sets according to the local submatrix desired.

6679:    When finished using the submatrices, the user should destroy
6680:    them with MatDestroyMatrices().

6682:    MAT_REUSE_MATRIX can only be used when the nonzero structure of the
6683:    original matrix has not changed from that last call to MatGetSubMatrices().

6685:    This routine creates the matrices in submat; you should NOT create them before
6686:    calling it. It also allocates the array of matrix pointers submat.

6688:    For BAIJ matrices the index sets must respect the block structure, that is if they
6689:    request one row/column in a block, they must request all rows/columns that are in
6690:    that block. For example, if the block size is 2 you cannot request just row 0 and
6691:    column 0.

6693:    Fortran Note:
6694:    The Fortran interface is slightly different from that given below; it
6695:    requires one to pass in  as submat a Mat (integer) array of size at least m.

6697:    Level: advanced

6699:    Concepts: matrices^accessing submatrices
6700:    Concepts: submatrices

6702: .seealso: MatDestroyMatrices(), MatGetSubMatrix(), MatGetRow(), MatGetDiagonal(), MatReuse
6703: @*/
6704: PetscErrorCode MatGetSubMatrices(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6705: {
6707:   PetscInt       i;
6708:   PetscBool      eq;

6713:   if (n) {
6718:   }
6720:   if (n && scall == MAT_REUSE_MATRIX) {
6723:   }
6724:   if (!mat->ops->getsubmatrices) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6725:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6726:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6727:   MatCheckPreallocated(mat,1);

6729:   PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6730:   (*mat->ops->getsubmatrices)(mat,n,irow,icol,scall,submat);
6731:   PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6732:   for (i=0; i<n; i++) {
6733:     (*submat)[i]->factortype = MAT_FACTOR_NONE;  /* in case in place factorization was previously done on submatrix */
6734:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6735:       ISEqual(irow[i],icol[i],&eq);
6736:       if (eq) {
6737:         if (mat->symmetric) {
6738:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6739:         } else if (mat->hermitian) {
6740:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6741:         } else if (mat->structurally_symmetric) {
6742:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6743:         }
6744:       }
6745:     }
6746:   }
6747:   return(0);
6748: }

6752: PetscErrorCode MatGetSubMatricesMPI(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
6753: {
6755:   PetscInt       i;
6756:   PetscBool      eq;

6761:   if (n) {
6766:   }
6768:   if (n && scall == MAT_REUSE_MATRIX) {
6771:   }
6772:   if (!mat->ops->getsubmatricesmpi) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6773:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6774:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6775:   MatCheckPreallocated(mat,1);

6777:   PetscLogEventBegin(MAT_GetSubMatrices,mat,0,0,0);
6778:   (*mat->ops->getsubmatricesmpi)(mat,n,irow,icol,scall,submat);
6779:   PetscLogEventEnd(MAT_GetSubMatrices,mat,0,0,0);
6780:   for (i=0; i<n; i++) {
6781:     if (mat->symmetric || mat->structurally_symmetric || mat->hermitian) {
6782:       ISEqual(irow[i],icol[i],&eq);
6783:       if (eq) {
6784:         if (mat->symmetric) {
6785:           MatSetOption((*submat)[i],MAT_SYMMETRIC,PETSC_TRUE);
6786:         } else if (mat->hermitian) {
6787:           MatSetOption((*submat)[i],MAT_HERMITIAN,PETSC_TRUE);
6788:         } else if (mat->structurally_symmetric) {
6789:           MatSetOption((*submat)[i],MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);
6790:         }
6791:       }
6792:     }
6793:   }
6794:   return(0);
6795: }

6799: /*@C
6800:    MatDestroyMatrices - Destroys a set of matrices obtained with MatGetSubMatrices().

6802:    Collective on Mat

6804:    Input Parameters:
6805: +  n - the number of local matrices
6806: -  mat - the matrices (note that this is a pointer to the array of matrices, just to match the calling
6807:                        sequence of MatGetSubMatrices())

6809:    Level: advanced

6811:     Notes: Frees not only the matrices, but also the array that contains the matrices
6812:            In Fortran will not free the array.

6814: .seealso: MatGetSubMatrices()
6815: @*/
6816: PetscErrorCode MatDestroyMatrices(PetscInt n,Mat *mat[])
6817: {
6819:   PetscInt       i;

6822:   if (!*mat) return(0);
6823:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Trying to destroy negative number of matrices %D",n);
6825:   for (i=0; i<n; i++) {
6826:     MatDestroy(&(*mat)[i]);
6827:   }
6828:   /* memory is allocated even if n = 0 */
6829:   PetscFree(*mat);
6830:   *mat = NULL;
6831:   return(0);
6832: }

6836: /*@C
6837:    MatGetSeqNonzeroStructure - Extracts the sequential nonzero structure from a matrix.

6839:    Collective on Mat

6841:    Input Parameters:
6842: .  mat - the matrix

6844:    Output Parameter:
6845: .  matstruct - the sequential matrix with the nonzero structure of mat

6847:   Level: intermediate

6849: .seealso: MatDestroySeqNonzeroStructure(), MatGetSubMatrices(), MatDestroyMatrices()
6850: @*/
6851: PetscErrorCode MatGetSeqNonzeroStructure(Mat mat,Mat *matstruct)
6852: {


6860:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6861:   MatCheckPreallocated(mat,1);

6863:   if (!mat->ops->getseqnonzerostructure) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Not for matrix type %s\n",((PetscObject)mat)->type_name);
6864:   PetscLogEventBegin(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6865:   (*mat->ops->getseqnonzerostructure)(mat,matstruct);
6866:   PetscLogEventEnd(MAT_GetSeqNonzeroStructure,mat,0,0,0);
6867:   return(0);
6868: }

6872: /*@C
6873:    MatDestroySeqNonzeroStructure - Destroys matrix obtained with MatGetSeqNonzeroStructure().

6875:    Collective on Mat

6877:    Input Parameters:
6878: .  mat - the matrix (note that this is a pointer to the array of matrices, just to match the calling
6879:                        sequence of MatGetSequentialNonzeroStructure())

6881:    Level: advanced

6883:     Notes: Frees not only the matrices, but also the array that contains the matrices

6885: .seealso: MatGetSeqNonzeroStructure()
6886: @*/
6887: PetscErrorCode MatDestroySeqNonzeroStructure(Mat *mat)
6888: {

6893:   MatDestroy(mat);
6894:   return(0);
6895: }

6899: /*@
6900:    MatIncreaseOverlap - Given a set of submatrices indicated by index sets,
6901:    replaces the index sets by larger ones that represent submatrices with
6902:    additional overlap.

6904:    Collective on Mat

6906:    Input Parameters:
6907: +  mat - the matrix
6908: .  n   - the number of index sets
6909: .  is  - the array of index sets (these index sets will changed during the call)
6910: -  ov  - the additional overlap requested

6912:    Options Database:
6913: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

6915:    Level: developer

6917:    Concepts: overlap
6918:    Concepts: ASM^computing overlap

6920: .seealso: MatGetSubMatrices()
6921: @*/
6922: PetscErrorCode MatIncreaseOverlap(Mat mat,PetscInt n,IS is[],PetscInt ov)
6923: {

6929:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6930:   if (n) {
6933:   }
6934:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6935:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6936:   MatCheckPreallocated(mat,1);

6938:   if (!ov) return(0);
6939:   if (!mat->ops->increaseoverlap) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
6940:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6941:   (*mat->ops->increaseoverlap)(mat,n,is,ov);
6942:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6943:   return(0);
6944: }


6947: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat,IS*,PetscInt);

6951: /*@
6952:    MatIncreaseOverlapSplit - Given a set of submatrices indicated by index sets across
6953:    a sub communicator, replaces the index sets by larger ones that represent submatrices with
6954:    additional overlap.

6956:    Collective on Mat

6958:    Input Parameters:
6959: +  mat - the matrix
6960: .  n   - the number of index sets
6961: .  is  - the array of index sets (these index sets will changed during the call)
6962: -  ov  - the additional overlap requested

6964:    Options Database:
6965: .  -mat_increase_overlap_scalable - use a scalable algorithm to compute the overlap (supported by MPIAIJ matrix)

6967:    Level: developer

6969:    Concepts: overlap
6970:    Concepts: ASM^computing overlap

6972: .seealso: MatGetSubMatrices()
6973: @*/
6974: PetscErrorCode MatIncreaseOverlapSplit(Mat mat,PetscInt n,IS is[],PetscInt ov)
6975: {
6976:   PetscInt       i;

6982:   if (n < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must have one or more domains, you have %D",n);
6983:   if (n) {
6986:   }
6987:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
6988:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
6989:   MatCheckPreallocated(mat,1);
6990:   if (!ov) return(0);
6991:   PetscLogEventBegin(MAT_IncreaseOverlap,mat,0,0,0);
6992:   for(i=0; i<n; i++){
6993:          MatIncreaseOverlapSplit_Single(mat,&is[i],ov);
6994:   }
6995:   PetscLogEventEnd(MAT_IncreaseOverlap,mat,0,0,0);
6996:   return(0);
6997: }




7004: /*@
7005:    MatGetBlockSize - Returns the matrix block size.

7007:    Not Collective

7009:    Input Parameter:
7010: .  mat - the matrix

7012:    Output Parameter:
7013: .  bs - block size

7015:    Notes:
7016:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.

7018:    If the block size has not been set yet this routine returns 1.

7020:    Level: intermediate

7022:    Concepts: matrices^block size

7024: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSizes()
7025: @*/
7026: PetscErrorCode MatGetBlockSize(Mat mat,PetscInt *bs)
7027: {
7031:   *bs = PetscAbs(mat->rmap->bs);
7032:   return(0);
7033: }

7037: /*@
7038:    MatGetBlockSizes - Returns the matrix block row and column sizes.

7040:    Not Collective

7042:    Input Parameter:
7043: .  mat - the matrix

7045:    Output Parameter:
7046: .  rbs - row block size
7047: .  cbs - coumn block size

7049:    Notes:
7050:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7051:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.

7053:    If a block size has not been set yet this routine returns 1.

7055:    Level: intermediate

7057:    Concepts: matrices^block size

7059: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatSetBlockSizes()
7060: @*/
7061: PetscErrorCode MatGetBlockSizes(Mat mat,PetscInt *rbs, PetscInt *cbs)
7062: {
7067:   if (rbs) *rbs = PetscAbs(mat->rmap->bs);
7068:   if (cbs) *cbs = PetscAbs(mat->cmap->bs);
7069:   return(0);
7070: }

7074: /*@
7075:    MatSetBlockSize - Sets the matrix block size.

7077:    Logically Collective on Mat

7079:    Input Parameters:
7080: +  mat - the matrix
7081: -  bs - block size

7083:    Notes:
7084:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.

7086:      This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later

7088:    Level: intermediate

7090:    Concepts: matrices^block size

7092: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes(), MatGetBlockSizes()
7093: @*/
7094: PetscErrorCode MatSetBlockSize(Mat mat,PetscInt bs)
7095: {

7101:   PetscLayoutSetBlockSize(mat->rmap,bs);
7102:   PetscLayoutSetBlockSize(mat->cmap,bs);
7103:   return(0);
7104: }

7108: /*@
7109:    MatSetBlockSizes - Sets the matrix block row and column sizes.

7111:    Logically Collective on Mat

7113:    Input Parameters:
7114: +  mat - the matrix
7115: -  rbs - row block size
7116: -  cbs - column block size

7118:    Notes:
7119:     Block row formats are MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ. These formats ALWAYS have square block storage in the matrix.
7120:     If you pass a different block size for the columns than the rows, the row block size determines the square block storage.

7122:     This must be called before MatSetUp() or MatXXXSetPreallocation() (or will default to 1) and the block size cannot be changed later

7124:     The row and column block size determine the blocksize of the "row" and "column" vectors returned by MatCreateVecs().

7126:    Level: intermediate

7128:    Concepts: matrices^block size

7130: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSize(), MatGetBlockSizes()
7131: @*/
7132: PetscErrorCode MatSetBlockSizes(Mat mat,PetscInt rbs,PetscInt cbs)
7133: {

7140:   PetscLayoutSetBlockSize(mat->rmap,rbs);
7141:   PetscLayoutSetBlockSize(mat->cmap,cbs);
7142:   return(0);
7143: }

7147: /*@
7148:    MatSetBlockSizesFromMats - Sets the matrix block row and column sizes to match a pair of matrices

7150:    Logically Collective on Mat

7152:    Input Parameters:
7153: +  mat - the matrix
7154: .  fromRow - matrix from which to copy row block size
7155: -  fromCol - matrix from which to copy column block size (can be same as fromRow)

7157:    Level: developer

7159:    Concepts: matrices^block size

7161: .seealso: MatCreateSeqBAIJ(), MatCreateBAIJ(), MatGetBlockSize(), MatSetBlockSizes()
7162: @*/
7163: PetscErrorCode MatSetBlockSizesFromMats(Mat mat,Mat fromRow,Mat fromCol)
7164: {

7171:   if (fromRow->rmap->bs > 0) {PetscLayoutSetBlockSize(mat->rmap,fromRow->rmap->bs);}
7172:   if (fromCol->cmap->bs > 0) {PetscLayoutSetBlockSize(mat->cmap,fromCol->cmap->bs);}
7173:   return(0);
7174: }

7178: /*@
7179:    MatResidual - Default routine to calculate the residual.

7181:    Collective on Mat and Vec

7183:    Input Parameters:
7184: +  mat - the matrix
7185: .  b   - the right-hand-side
7186: -  x   - the approximate solution

7188:    Output Parameter:
7189: .  r - location to store the residual

7191:    Level: developer

7193: .keywords: MG, default, multigrid, residual

7195: .seealso: PCMGSetResidual()
7196: @*/
7197: PetscErrorCode MatResidual(Mat mat,Vec b,Vec x,Vec r)
7198: {

7207:   MatCheckPreallocated(mat,1);
7208:   PetscLogEventBegin(MAT_Residual,mat,0,0,0);
7209:   if (!mat->ops->residual) {
7210:     MatMult(mat,x,r);
7211:     VecAYPX(r,-1.0,b);
7212:   } else {
7213:     (*mat->ops->residual)(mat,b,x,r);
7214:   }
7215:   PetscLogEventEnd(MAT_Residual,mat,0,0,0);
7216:   return(0);
7217: }

7221: /*@C
7222:     MatGetRowIJ - Returns the compressed row storage i and j indices for sequential matrices.

7224:    Collective on Mat

7226:     Input Parameters:
7227: +   mat - the matrix
7228: .   shift -  0 or 1 indicating we want the indices starting at 0 or 1
7229: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be   symmetrized
7230: -   inodecompressed - PETSC_TRUE or PETSC_FALSE  indicating if the nonzero structure of the
7231:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7232:                  always used.

7234:     Output Parameters:
7235: +   n - number of rows in the (possibly compressed) matrix
7236: .   ia - the row pointers [of length n+1]
7237: .   ja - the column indices
7238: -   done - indicates if the routine actually worked and returned appropriate ia[] and ja[] arrays; callers
7239:            are responsible for handling the case when done == PETSC_FALSE and ia and ja are not set

7241:     Level: developer

7243:     Notes: You CANNOT change any of the ia[] or ja[] values.

7245:            Use MatRestoreRowIJ() when you are finished accessing the ia[] and ja[] values

7247:     Fortran Node

7249:            In Fortran use
7250: $           PetscInt ia(1), ja(1)
7251: $           PetscOffset iia, jja
7252: $      call MatGetRowIJ(mat,shift,symmetric,inodecompressed,n,ia,iia,ja,jja,done,ierr)
7253: $      Acess the ith and jth entries via ia(iia + i) and ja(jja + j)
7254: $
7255: $          or
7256: $
7257: $           PetscInt, pointer :: ia(:),ja(:)
7258: $    call  MatGetRowIJF90(mat,shift,symmetric,inodecompressed,n,ia,ja,done,ierr)
7259: $      Acess the ith and jth entries via ia(i) and ja(j)



7263: .seealso: MatGetColumnIJ(), MatRestoreRowIJ(), MatSeqAIJGetArray()
7264: @*/
7265: PetscErrorCode MatGetRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7266: {

7276:   MatCheckPreallocated(mat,1);
7277:   if (!mat->ops->getrowij) *done = PETSC_FALSE;
7278:   else {
7279:     *done = PETSC_TRUE;
7280:     PetscLogEventBegin(MAT_GetRowIJ,mat,0,0,0);
7281:     (*mat->ops->getrowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7282:     PetscLogEventEnd(MAT_GetRowIJ,mat,0,0,0);
7283:   }
7284:   return(0);
7285: }

7289: /*@C
7290:     MatGetColumnIJ - Returns the compressed column storage i and j indices for sequential matrices.

7292:     Collective on Mat

7294:     Input Parameters:
7295: +   mat - the matrix
7296: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7297: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7298:                 symmetrized
7299: .   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7300:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7301:                  always used.
7302: .   n - number of columns in the (possibly compressed) matrix
7303: .   ia - the column pointers
7304: -   ja - the row indices

7306:     Output Parameters:
7307: .   done - PETSC_TRUE or PETSC_FALSE, indicating whether the values have been returned

7309:     Note:
7310:     This routine zeros out n, ia, and ja. This is to prevent accidental
7311:     us of the array after it has been restored. If you pass NULL, it will
7312:     not zero the pointers.  Use of ia or ja after MatRestoreColumnIJ() is invalid.

7314:     Level: developer

7316: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7317: @*/
7318: PetscErrorCode MatGetColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7319: {

7329:   MatCheckPreallocated(mat,1);
7330:   if (!mat->ops->getcolumnij) *done = PETSC_FALSE;
7331:   else {
7332:     *done = PETSC_TRUE;
7333:     (*mat->ops->getcolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7334:   }
7335:   return(0);
7336: }

7340: /*@C
7341:     MatRestoreRowIJ - Call after you are completed with the ia,ja indices obtained with
7342:     MatGetRowIJ().

7344:     Collective on Mat

7346:     Input Parameters:
7347: +   mat - the matrix
7348: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7349: .   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7350:                 symmetrized
7351: .   inodecompressed -  PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7352:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7353:                  always used.
7354: .   n - size of (possibly compressed) matrix
7355: .   ia - the row pointers
7356: -   ja - the column indices

7358:     Output Parameters:
7359: .   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7361:     Note:
7362:     This routine zeros out n, ia, and ja. This is to prevent accidental
7363:     us of the array after it has been restored. If you pass NULL, it will
7364:     not zero the pointers.  Use of ia or ja after MatRestoreRowIJ() is invalid.

7366:     Level: developer

7368: .seealso: MatGetRowIJ(), MatRestoreColumnIJ()
7369: @*/
7370: PetscErrorCode MatRestoreRowIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7371: {

7380:   MatCheckPreallocated(mat,1);

7382:   if (!mat->ops->restorerowij) *done = PETSC_FALSE;
7383:   else {
7384:     *done = PETSC_TRUE;
7385:     (*mat->ops->restorerowij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7386:     if (n)  *n = 0;
7387:     if (ia) *ia = NULL;
7388:     if (ja) *ja = NULL;
7389:   }
7390:   return(0);
7391: }

7395: /*@C
7396:     MatRestoreColumnIJ - Call after you are completed with the ia,ja indices obtained with
7397:     MatGetColumnIJ().

7399:     Collective on Mat

7401:     Input Parameters:
7402: +   mat - the matrix
7403: .   shift - 1 or zero indicating we want the indices starting at 0 or 1
7404: -   symmetric - PETSC_TRUE or PETSC_FALSE indicating the matrix data structure should be
7405:                 symmetrized
7406: -   inodecompressed - PETSC_TRUE or PETSC_FALSE indicating if the nonzero structure of the
7407:                  inodes or the nonzero elements is wanted. For BAIJ matrices the compressed version is
7408:                  always used.

7410:     Output Parameters:
7411: +   n - size of (possibly compressed) matrix
7412: .   ia - the column pointers
7413: .   ja - the row indices
7414: -   done - PETSC_TRUE or PETSC_FALSE indicated that the values have been returned

7416:     Level: developer

7418: .seealso: MatGetColumnIJ(), MatRestoreRowIJ()
7419: @*/
7420: PetscErrorCode MatRestoreColumnIJ(Mat mat,PetscInt shift,PetscBool symmetric,PetscBool inodecompressed,PetscInt *n,const PetscInt *ia[],const PetscInt *ja[],PetscBool  *done)
7421: {

7430:   MatCheckPreallocated(mat,1);

7432:   if (!mat->ops->restorecolumnij) *done = PETSC_FALSE;
7433:   else {
7434:     *done = PETSC_TRUE;
7435:     (*mat->ops->restorecolumnij)(mat,shift,symmetric,inodecompressed,n,ia,ja,done);
7436:     if (n)  *n = 0;
7437:     if (ia) *ia = NULL;
7438:     if (ja) *ja = NULL;
7439:   }
7440:   return(0);
7441: }

7445: /*@C
7446:     MatColoringPatch -Used inside matrix coloring routines that
7447:     use MatGetRowIJ() and/or MatGetColumnIJ().

7449:     Collective on Mat

7451:     Input Parameters:
7452: +   mat - the matrix
7453: .   ncolors - max color value
7454: .   n   - number of entries in colorarray
7455: -   colorarray - array indicating color for each column

7457:     Output Parameters:
7458: .   iscoloring - coloring generated using colorarray information

7460:     Level: developer

7462: .seealso: MatGetRowIJ(), MatGetColumnIJ()

7464: @*/
7465: PetscErrorCode MatColoringPatch(Mat mat,PetscInt ncolors,PetscInt n,ISColoringValue colorarray[],ISColoring *iscoloring)
7466: {

7474:   MatCheckPreallocated(mat,1);

7476:   if (!mat->ops->coloringpatch) {
7477:     ISColoringCreate(PetscObjectComm((PetscObject)mat),ncolors,n,colorarray,PETSC_OWN_POINTER,iscoloring);
7478:   } else {
7479:     (*mat->ops->coloringpatch)(mat,ncolors,n,colorarray,iscoloring);
7480:   }
7481:   return(0);
7482: }


7487: /*@
7488:    MatSetUnfactored - Resets a factored matrix to be treated as unfactored.

7490:    Logically Collective on Mat

7492:    Input Parameter:
7493: .  mat - the factored matrix to be reset

7495:    Notes:
7496:    This routine should be used only with factored matrices formed by in-place
7497:    factorization via ILU(0) (or by in-place LU factorization for the MATSEQDENSE
7498:    format).  This option can save memory, for example, when solving nonlinear
7499:    systems with a matrix-free Newton-Krylov method and a matrix-based, in-place
7500:    ILU(0) preconditioner.

7502:    Note that one can specify in-place ILU(0) factorization by calling
7503: .vb
7504:      PCType(pc,PCILU);
7505:      PCFactorSeUseInPlace(pc);
7506: .ve
7507:    or by using the options -pc_type ilu -pc_factor_in_place

7509:    In-place factorization ILU(0) can also be used as a local
7510:    solver for the blocks within the block Jacobi or additive Schwarz
7511:    methods (runtime option: -sub_pc_factor_in_place).  See Users-Manual: ch_pc
7512:    for details on setting local solver options.

7514:    Most users should employ the simplified KSP interface for linear solvers
7515:    instead of working directly with matrix algebra routines such as this.
7516:    See, e.g., KSPCreate().

7518:    Level: developer

7520: .seealso: PCFactorSetUseInPlace(), PCFactorGetUseInPlace()

7522:    Concepts: matrices^unfactored

7524: @*/
7525: PetscErrorCode MatSetUnfactored(Mat mat)
7526: {

7532:   MatCheckPreallocated(mat,1);
7533:   mat->factortype = MAT_FACTOR_NONE;
7534:   if (!mat->ops->setunfactored) return(0);
7535:   (*mat->ops->setunfactored)(mat);
7536:   return(0);
7537: }

7539: /*MC
7540:     MatDenseGetArrayF90 - Accesses a matrix array from Fortran90.

7542:     Synopsis:
7543:     MatDenseGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7545:     Not collective

7547:     Input Parameter:
7548: .   x - matrix

7550:     Output Parameters:
7551: +   xx_v - the Fortran90 pointer to the array
7552: -   ierr - error code

7554:     Example of Usage:
7555: .vb
7556:       PetscScalar, pointer xx_v(:,:)
7557:       ....
7558:       call MatDenseGetArrayF90(x,xx_v,ierr)
7559:       a = xx_v(3)
7560:       call MatDenseRestoreArrayF90(x,xx_v,ierr)
7561: .ve

7563:     Level: advanced

7565: .seealso:  MatDenseRestoreArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJGetArrayF90()

7567:     Concepts: matrices^accessing array

7569: M*/

7571: /*MC
7572:     MatDenseRestoreArrayF90 - Restores a matrix array that has been
7573:     accessed with MatDenseGetArrayF90().

7575:     Synopsis:
7576:     MatDenseRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:,:)},integer ierr)

7578:     Not collective

7580:     Input Parameters:
7581: +   x - matrix
7582: -   xx_v - the Fortran90 pointer to the array

7584:     Output Parameter:
7585: .   ierr - error code

7587:     Example of Usage:
7588: .vb
7589:        PetscScalar, pointer xx_v(:,:)
7590:        ....
7591:        call MatDenseGetArrayF90(x,xx_v,ierr)
7592:        a = xx_v(3)
7593:        call MatDenseRestoreArrayF90(x,xx_v,ierr)
7594: .ve

7596:     Level: advanced

7598: .seealso:  MatDenseGetArrayF90(), MatDenseGetArray(), MatDenseRestoreArray(), MatSeqAIJRestoreArrayF90()

7600: M*/


7603: /*MC
7604:     MatSeqAIJGetArrayF90 - Accesses a matrix array from Fortran90.

7606:     Synopsis:
7607:     MatSeqAIJGetArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7609:     Not collective

7611:     Input Parameter:
7612: .   x - matrix

7614:     Output Parameters:
7615: +   xx_v - the Fortran90 pointer to the array
7616: -   ierr - error code

7618:     Example of Usage:
7619: .vb
7620:       PetscScalar, pointer xx_v(:)
7621:       ....
7622:       call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7623:       a = xx_v(3)
7624:       call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7625: .ve

7627:     Level: advanced

7629: .seealso:  MatSeqAIJRestoreArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseGetArrayF90()

7631:     Concepts: matrices^accessing array

7633: M*/

7635: /*MC
7636:     MatSeqAIJRestoreArrayF90 - Restores a matrix array that has been
7637:     accessed with MatSeqAIJGetArrayF90().

7639:     Synopsis:
7640:     MatSeqAIJRestoreArrayF90(Mat x,{Scalar, pointer :: xx_v(:)},integer ierr)

7642:     Not collective

7644:     Input Parameters:
7645: +   x - matrix
7646: -   xx_v - the Fortran90 pointer to the array

7648:     Output Parameter:
7649: .   ierr - error code

7651:     Example of Usage:
7652: .vb
7653:        PetscScalar, pointer xx_v(:)
7654:        ....
7655:        call MatSeqAIJGetArrayF90(x,xx_v,ierr)
7656:        a = xx_v(3)
7657:        call MatSeqAIJRestoreArrayF90(x,xx_v,ierr)
7658: .ve

7660:     Level: advanced

7662: .seealso:  MatSeqAIJGetArrayF90(), MatSeqAIJGetArray(), MatSeqAIJRestoreArray(), MatDenseRestoreArrayF90()

7664: M*/


7669: /*@
7670:     MatGetSubMatrix - Gets a single submatrix on the same number of processors
7671:                       as the original matrix.

7673:     Collective on Mat

7675:     Input Parameters:
7676: +   mat - the original matrix
7677: .   isrow - parallel IS containing the rows this processor should obtain
7678: .   iscol - parallel IS containing all columns you wish to keep. Each process should list the columns that will be in IT's "diagonal part" in the new matrix.
7679: -   cll - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

7681:     Output Parameter:
7682: .   newmat - the new submatrix, of the same type as the old

7684:     Level: advanced

7686:     Notes:
7687:     The submatrix will be able to be multiplied with vectors using the same layout as iscol.

7689:     Some matrix types place restrictions on the row and column indices, such
7690:     as that they be sorted or that they be equal to each other.

7692:     The index sets may not have duplicate entries.

7694:       The first time this is called you should use a cll of MAT_INITIAL_MATRIX,
7695:    the MatGetSubMatrix() routine will create the newmat for you. Any additional calls
7696:    to this routine with a mat of the same nonzero structure and with a call of MAT_REUSE_MATRIX
7697:    will reuse the matrix generated the first time.  You should call MatDestroy() on newmat when
7698:    you are finished using it.

7700:     The communicator of the newly obtained matrix is ALWAYS the same as the communicator of
7701:     the input matrix.

7703:     If iscol is NULL then all columns are obtained (not supported in Fortran).

7705:    Example usage:
7706:    Consider the following 8x8 matrix with 34 non-zero values, that is
7707:    assembled across 3 processors. Let's assume that proc0 owns 3 rows,
7708:    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
7709:    as follows:

7711: .vb
7712:             1  2  0  |  0  3  0  |  0  4
7713:     Proc0   0  5  6  |  7  0  0  |  8  0
7714:             9  0 10  | 11  0  0  | 12  0
7715:     -------------------------------------
7716:            13  0 14  | 15 16 17  |  0  0
7717:     Proc1   0 18  0  | 19 20 21  |  0  0
7718:             0  0  0  | 22 23  0  | 24  0
7719:     -------------------------------------
7720:     Proc2  25 26 27  |  0  0 28  | 29  0
7721:            30  0  0  | 31 32 33  |  0 34
7722: .ve

7724:     Suppose isrow = [0 1 | 4 | 6 7] and iscol = [1 2 | 3 4 5 | 6].  The resulting submatrix is

7726: .vb
7727:             2  0  |  0  3  0  |  0
7728:     Proc0   5  6  |  7  0  0  |  8
7729:     -------------------------------
7730:     Proc1  18  0  | 19 20 21  |  0
7731:     -------------------------------
7732:     Proc2  26 27  |  0  0 28  | 29
7733:             0  0  | 31 32 33  |  0
7734: .ve


7737:     Concepts: matrices^submatrices

7739: .seealso: MatGetSubMatrices()
7740: @*/
7741: PetscErrorCode MatGetSubMatrix(Mat mat,IS isrow,IS iscol,MatReuse cll,Mat *newmat)
7742: {
7744:   PetscMPIInt    size;
7745:   Mat            *local;
7746:   IS             iscoltmp;

7755:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
7756:   if (cll == MAT_IGNORE_MATRIX) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Cannot use MAT_IGNORE_MATRIX");

7758:   MatCheckPreallocated(mat,1);
7759:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);

7761:   if (!iscol || isrow == iscol) {
7762:     PetscBool   stride;
7763:     PetscMPIInt grabentirematrix = 0,grab;
7764:     PetscObjectTypeCompare((PetscObject)isrow,ISSTRIDE,&stride);
7765:     if (stride) {
7766:       PetscInt first,step,n,rstart,rend;
7767:       ISStrideGetInfo(isrow,&first,&step);
7768:       if (step == 1) {
7769:         MatGetOwnershipRange(mat,&rstart,&rend);
7770:         if (rstart == first) {
7771:           ISGetLocalSize(isrow,&n);
7772:           if (n == rend-rstart) {
7773:             grabentirematrix = 1;
7774:           }
7775:         }
7776:       }
7777:     }
7778:     MPIU_Allreduce(&grabentirematrix,&grab,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));
7779:     if (grab) {
7780:       PetscInfo(mat,"Getting entire matrix as submatrix\n");
7781:       if (cll == MAT_INITIAL_MATRIX) {
7782:         *newmat = mat;
7783:         PetscObjectReference((PetscObject)mat);
7784:       }
7785:       return(0);
7786:     }
7787:   }

7789:   if (!iscol) {
7790:     ISCreateStride(PetscObjectComm((PetscObject)mat),mat->cmap->n,mat->cmap->rstart,1,&iscoltmp);
7791:   } else {
7792:     iscoltmp = iscol;
7793:   }

7795:   /* if original matrix is on just one processor then use submatrix generated */
7796:   if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1 && cll == MAT_REUSE_MATRIX) {
7797:     MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_REUSE_MATRIX,&newmat);
7798:     if (!iscol) {ISDestroy(&iscoltmp);}
7799:     return(0);
7800:   } else if (mat->ops->getsubmatrices && !mat->ops->getsubmatrix && size == 1) {
7801:     MatGetSubMatrices(mat,1,&isrow,&iscoltmp,MAT_INITIAL_MATRIX,&local);
7802:     *newmat = *local;
7803:     PetscFree(local);
7804:     if (!iscol) {ISDestroy(&iscoltmp);}
7805:     return(0);
7806:   } else if (!mat->ops->getsubmatrix) {
7807:     /* Create a new matrix type that implements the operation using the full matrix */
7808:     PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7809:     switch (cll) {
7810:     case MAT_INITIAL_MATRIX:
7811:       MatCreateSubMatrix(mat,isrow,iscoltmp,newmat);
7812:       break;
7813:     case MAT_REUSE_MATRIX:
7814:       MatSubMatrixUpdate(*newmat,mat,isrow,iscoltmp);
7815:       break;
7816:     default: SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"Invalid MatReuse, must be either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX");
7817:     }
7818:     PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7819:     if (!iscol) {ISDestroy(&iscoltmp);}
7820:     return(0);
7821:   }

7823:   if (!mat->ops->getsubmatrix) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
7824:   PetscLogEventBegin(MAT_GetSubMatrix,mat,0,0,0);
7825:   (*mat->ops->getsubmatrix)(mat,isrow,iscoltmp,cll,newmat);
7826:   PetscLogEventEnd(MAT_GetSubMatrix,mat,0,0,0);
7827:   if (!iscol) {ISDestroy(&iscoltmp);}
7828:   if (*newmat && cll == MAT_INITIAL_MATRIX) {PetscObjectStateIncrease((PetscObject)*newmat);}
7829:   return(0);
7830: }

7834: /*@
7835:    MatStashSetInitialSize - sets the sizes of the matrix stash, that is
7836:    used during the assembly process to store values that belong to
7837:    other processors.

7839:    Not Collective

7841:    Input Parameters:
7842: +  mat   - the matrix
7843: .  size  - the initial size of the stash.
7844: -  bsize - the initial size of the block-stash(if used).

7846:    Options Database Keys:
7847: +   -matstash_initial_size <size> or <size0,size1,...sizep-1>
7848: -   -matstash_block_initial_size <bsize>  or <bsize0,bsize1,...bsizep-1>

7850:    Level: intermediate

7852:    Notes:
7853:      The block-stash is used for values set with MatSetValuesBlocked() while
7854:      the stash is used for values set with MatSetValues()

7856:      Run with the option -info and look for output of the form
7857:      MatAssemblyBegin_MPIXXX:Stash has MM entries, uses nn mallocs.
7858:      to determine the appropriate value, MM, to use for size and
7859:      MatAssemblyBegin_MPIXXX:Block-Stash has BMM entries, uses nn mallocs.
7860:      to determine the value, BMM to use for bsize

7862:    Concepts: stash^setting matrix size
7863:    Concepts: matrices^stash

7865: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashGetInfo()

7867: @*/
7868: PetscErrorCode MatStashSetInitialSize(Mat mat,PetscInt size, PetscInt bsize)
7869: {

7875:   MatStashSetInitialSize_Private(&mat->stash,size);
7876:   MatStashSetInitialSize_Private(&mat->bstash,bsize);
7877:   return(0);
7878: }

7882: /*@
7883:    MatInterpolateAdd - w = y + A*x or A'*x depending on the shape of
7884:      the matrix

7886:    Neighbor-wise Collective on Mat

7888:    Input Parameters:
7889: +  mat   - the matrix
7890: .  x,y - the vectors
7891: -  w - where the result is stored

7893:    Level: intermediate

7895:    Notes:
7896:     w may be the same vector as y.

7898:     This allows one to use either the restriction or interpolation (its transpose)
7899:     matrix to do the interpolation

7901:     Concepts: interpolation

7903: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

7905: @*/
7906: PetscErrorCode MatInterpolateAdd(Mat A,Vec x,Vec y,Vec w)
7907: {
7909:   PetscInt       M,N,Ny;

7917:   MatCheckPreallocated(A,1);
7918:   MatGetSize(A,&M,&N);
7919:   VecGetSize(y,&Ny);
7920:   if (M == Ny) {
7921:     MatMultAdd(A,x,y,w);
7922:   } else {
7923:     MatMultTransposeAdd(A,x,y,w);
7924:   }
7925:   return(0);
7926: }

7930: /*@
7931:    MatInterpolate - y = A*x or A'*x depending on the shape of
7932:      the matrix

7934:    Neighbor-wise Collective on Mat

7936:    Input Parameters:
7937: +  mat   - the matrix
7938: -  x,y - the vectors

7940:    Level: intermediate

7942:    Notes:
7943:     This allows one to use either the restriction or interpolation (its transpose)
7944:     matrix to do the interpolation

7946:    Concepts: matrices^interpolation

7948: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatRestrict()

7950: @*/
7951: PetscErrorCode MatInterpolate(Mat A,Vec x,Vec y)
7952: {
7954:   PetscInt       M,N,Ny;

7961:   MatCheckPreallocated(A,1);
7962:   MatGetSize(A,&M,&N);
7963:   VecGetSize(y,&Ny);
7964:   if (M == Ny) {
7965:     MatMult(A,x,y);
7966:   } else {
7967:     MatMultTranspose(A,x,y);
7968:   }
7969:   return(0);
7970: }

7974: /*@
7975:    MatRestrict - y = A*x or A'*x

7977:    Neighbor-wise Collective on Mat

7979:    Input Parameters:
7980: +  mat   - the matrix
7981: -  x,y - the vectors

7983:    Level: intermediate

7985:    Notes:
7986:     This allows one to use either the restriction or interpolation (its transpose)
7987:     matrix to do the restriction

7989:    Concepts: matrices^restriction

7991: .seealso: MatMultAdd(), MatMultTransposeAdd(), MatInterpolate()

7993: @*/
7994: PetscErrorCode MatRestrict(Mat A,Vec x,Vec y)
7995: {
7997:   PetscInt       M,N,Ny;

8004:   MatCheckPreallocated(A,1);

8006:   MatGetSize(A,&M,&N);
8007:   VecGetSize(y,&Ny);
8008:   if (M == Ny) {
8009:     MatMult(A,x,y);
8010:   } else {
8011:     MatMultTranspose(A,x,y);
8012:   }
8013:   return(0);
8014: }

8018: /*@
8019:    MatGetNullSpace - retrieves the null space to a matrix.

8021:    Logically Collective on Mat and MatNullSpace

8023:    Input Parameters:
8024: +  mat - the matrix
8025: -  nullsp - the null space object

8027:    Level: developer

8029:    Concepts: null space^attaching to matrix

8031: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetNullSpace()
8032: @*/
8033: PetscErrorCode MatGetNullSpace(Mat mat, MatNullSpace *nullsp)
8034: {
8039:   *nullsp = mat->nullsp;
8040:   return(0);
8041: }

8045: /*@
8046:    MatSetNullSpace - attaches a null space to a matrix.

8048:    Logically Collective on Mat and MatNullSpace

8050:    Input Parameters:
8051: +  mat - the matrix
8052: -  nullsp - the null space object

8054:    Level: advanced

8056:    Notes:
8057:       This null space is used by the linear solvers. Overwrites any previous null space that may have been attached

8059:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) you also likely should
8060:       call MatSetTransposeNullSpace(). This allows the linear system to be solved in a least squares sense.

8062:       You can remove the null space by calling this routine with an nullsp of NULL


8065:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8066:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8067:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8068:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8069:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8071:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8073:    Concepts: null space^attaching to matrix

8075: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetTransposeNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8076: @*/
8077: PetscErrorCode MatSetNullSpace(Mat mat,MatNullSpace nullsp)
8078: {

8085:   MatCheckPreallocated(mat,1);
8086:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8087:   MatNullSpaceDestroy(&mat->nullsp);
8088:   mat->nullsp = nullsp;
8089:   return(0);
8090: }

8094: /*@
8095:    MatGetTransposeNullSpace - retrieves the null space of the transpose of a matrix.

8097:    Logically Collective on Mat and MatNullSpace

8099:    Input Parameters:
8100: +  mat - the matrix
8101: -  nullsp - the null space object

8103:    Level: developer

8105:    Concepts: null space^attaching to matrix

8107: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatSetTransposeNullSpace(), MatSetNullSpace(), MatGetNullSpace()
8108: @*/
8109: PetscErrorCode MatGetTransposeNullSpace(Mat mat, MatNullSpace *nullsp)
8110: {
8115:   *nullsp = mat->transnullsp;
8116:   return(0);
8117: }

8121: /*@
8122:    MatSetTransposeNullSpace - attaches a null space to a matrix.

8124:    Logically Collective on Mat and MatNullSpace

8126:    Input Parameters:
8127: +  mat - the matrix
8128: -  nullsp - the null space object

8130:    Level: advanced

8132:    Notes:
8133:       For inconsistent singular systems (linear systems where the right hand side is not in the range of the operator) this allows the linear system to be solved in a least squares sense.
8134:       You must also call MatSetNullSpace()


8137:       The fundamental theorem of linear algebra (Gilbert Strang, Introduction to Applied Mathematics, page 72) states that
8138:    the domain of a matrix A (from R^n to R^m (m rows, n columns) R^n = the direct sum of the null space of A, n(A), + the range of A^T, R(A^T).
8139:    Similarly R^m = direct sum n(A^T) + R(A).  Hence the linear system A x = b has a solution only if b in R(A) (or correspondingly b is orthogonal to
8140:    n(A^T)) and if x is a solution then x + alpha n(A) is a solution for any alpha. The minimum norm solution is orthogonal to n(A). For problems without a solution
8141:    the solution that minimizes the norm of the residual (the least squares solution) can be obtained by solving A x = \hat{b} where \hat{b} is b orthogonalized to the n(A^T).

8143:       Krylov solvers can produce the minimal norm solution to the least squares problem by utilizing MatNullSpaceRemove().

8145:    Concepts: null space^attaching to matrix

8147: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNearNullSpace(), MatGetNullSpace(), MatSetNullSpace(), MatGetTransposeNullSpace(), MatNullSpaceRemove()
8148: @*/
8149: PetscErrorCode MatSetTransposeNullSpace(Mat mat,MatNullSpace nullsp)
8150: {

8157:   MatCheckPreallocated(mat,1);
8158:   PetscObjectReference((PetscObject)nullsp);
8159:   MatNullSpaceDestroy(&mat->transnullsp);
8160:   mat->transnullsp = nullsp;
8161:   return(0);
8162: }

8166: /*@
8167:    MatSetNearNullSpace - attaches a null space to a matrix, which is often the null space (rigid body modes) of the operator without boundary conditions
8168:         This null space will be used to provide near null space vectors to a multigrid preconditioner built from this matrix.

8170:    Logically Collective on Mat and MatNullSpace

8172:    Input Parameters:
8173: +  mat - the matrix
8174: -  nullsp - the null space object

8176:    Level: advanced

8178:    Notes:
8179:       Overwrites any previous near null space that may have been attached

8181:       You can remove the null space by calling this routine with an nullsp of NULL

8183:    Concepts: null space^attaching to matrix

8185: .seealso: MatCreate(), MatNullSpaceCreate(), MatSetNullSpace(), MatNullSpaceCreateRigidBody(), MatGetNearNullSpace()
8186: @*/
8187: PetscErrorCode MatSetNearNullSpace(Mat mat,MatNullSpace nullsp)
8188: {

8195:   MatCheckPreallocated(mat,1);
8196:   if (nullsp) {PetscObjectReference((PetscObject)nullsp);}
8197:   MatNullSpaceDestroy(&mat->nearnullsp);
8198:   mat->nearnullsp = nullsp;
8199:   return(0);
8200: }

8204: /*@
8205:    MatGetNearNullSpace -Get null space attached with MatSetNearNullSpace()

8207:    Not Collective

8209:    Input Parameters:
8210: .  mat - the matrix

8212:    Output Parameters:
8213: .  nullsp - the null space object, NULL if not set

8215:    Level: developer

8217:    Concepts: null space^attaching to matrix

8219: .seealso: MatSetNearNullSpace(), MatGetNullSpace(), MatNullSpaceCreate()
8220: @*/
8221: PetscErrorCode MatGetNearNullSpace(Mat mat,MatNullSpace *nullsp)
8222: {
8227:   MatCheckPreallocated(mat,1);
8228:   *nullsp = mat->nearnullsp;
8229:   return(0);
8230: }

8234: /*@C
8235:    MatICCFactor - Performs in-place incomplete Cholesky factorization of matrix.

8237:    Collective on Mat

8239:    Input Parameters:
8240: +  mat - the matrix
8241: .  row - row/column permutation
8242: .  fill - expected fill factor >= 1.0
8243: -  level - level of fill, for ICC(k)

8245:    Notes:
8246:    Probably really in-place only when level of fill is zero, otherwise allocates
8247:    new space to store factored matrix and deletes previous memory.

8249:    Most users should employ the simplified KSP interface for linear solvers
8250:    instead of working directly with matrix algebra routines such as this.
8251:    See, e.g., KSPCreate().

8253:    Level: developer

8255:    Concepts: matrices^incomplete Cholesky factorization
8256:    Concepts: Cholesky factorization

8258: .seealso: MatICCFactorSymbolic(), MatLUFactorNumeric(), MatCholeskyFactor()

8260:     Developer Note: fortran interface is not autogenerated as the f90
8261:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8263: @*/
8264: PetscErrorCode MatICCFactor(Mat mat,IS row,const MatFactorInfo *info)
8265: {

8273:   if (mat->rmap->N != mat->cmap->N) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONG,"matrix must be square");
8274:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
8275:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
8276:   if (!mat->ops->iccfactor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8277:   MatCheckPreallocated(mat,1);
8278:   (*mat->ops->iccfactor)(mat,row,info);
8279:   PetscObjectStateIncrease((PetscObject)mat);
8280:   return(0);
8281: }

8285: /*@
8286:    MatSetValuesAdifor - Sets values computed with automatic differentiation into a matrix.

8288:    Not Collective

8290:    Input Parameters:
8291: +  mat - the matrix
8292: .  nl - leading dimension of v
8293: -  v - the values compute with ADIFOR

8295:    Level: developer

8297:    Notes:
8298:      Must call MatSetColoring() before using this routine. Also this matrix must already
8299:      have its nonzero pattern determined.

8301: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValuesBlocked(), MatSetValuesLocal(),
8302:           MatSetValues(), MatSetColoring()
8303: @*/
8304: PetscErrorCode MatSetValuesAdifor(Mat mat,PetscInt nl,void *v)
8305: {


8313:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8314:   PetscLogEventBegin(MAT_SetValues,mat,0,0,0);
8315:   if (!mat->ops->setvaluesadifor) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8316:   (*mat->ops->setvaluesadifor)(mat,nl,v);
8317:   PetscLogEventEnd(MAT_SetValues,mat,0,0,0);
8318:   PetscObjectStateIncrease((PetscObject)mat);
8319:   return(0);
8320: }

8324: /*@
8325:    MatDiagonalScaleLocal - Scales columns of a matrix given the scaling values including the
8326:          ghosted ones.

8328:    Not Collective

8330:    Input Parameters:
8331: +  mat - the matrix
8332: -  diag = the diagonal values, including ghost ones

8334:    Level: developer

8336:    Notes: Works only for MPIAIJ and MPIBAIJ matrices

8338: .seealso: MatDiagonalScale()
8339: @*/
8340: PetscErrorCode MatDiagonalScaleLocal(Mat mat,Vec diag)
8341: {
8343:   PetscMPIInt    size;


8350:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must be already assembled");
8351:   PetscLogEventBegin(MAT_Scale,mat,0,0,0);
8352:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
8353:   if (size == 1) {
8354:     PetscInt n,m;
8355:     VecGetSize(diag,&n);
8356:     MatGetSize(mat,0,&m);
8357:     if (m == n) {
8358:       MatDiagonalScale(mat,0,diag);
8359:     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only supported for sequential matrices when no ghost points/periodic conditions");
8360:   } else {
8361:     PetscUseMethod(mat,"MatDiagonalScaleLocal_C",(Mat,Vec),(mat,diag));
8362:   }
8363:   PetscLogEventEnd(MAT_Scale,mat,0,0,0);
8364:   PetscObjectStateIncrease((PetscObject)mat);
8365:   return(0);
8366: }

8370: /*@
8371:    MatGetInertia - Gets the inertia from a factored matrix

8373:    Collective on Mat

8375:    Input Parameter:
8376: .  mat - the matrix

8378:    Output Parameters:
8379: +   nneg - number of negative eigenvalues
8380: .   nzero - number of zero eigenvalues
8381: -   npos - number of positive eigenvalues

8383:    Level: advanced

8385:    Notes: Matrix must have been factored by MatCholeskyFactor()


8388: @*/
8389: PetscErrorCode MatGetInertia(Mat mat,PetscInt *nneg,PetscInt *nzero,PetscInt *npos)
8390: {

8396:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8397:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Numeric factor mat is not assembled");
8398:   if (!mat->ops->getinertia) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8399:   (*mat->ops->getinertia)(mat,nneg,nzero,npos);
8400:   return(0);
8401: }

8403: /* ----------------------------------------------------------------*/
8406: /*@C
8407:    MatSolves - Solves A x = b, given a factored matrix, for a collection of vectors

8409:    Neighbor-wise Collective on Mat and Vecs

8411:    Input Parameters:
8412: +  mat - the factored matrix
8413: -  b - the right-hand-side vectors

8415:    Output Parameter:
8416: .  x - the result vectors

8418:    Notes:
8419:    The vectors b and x cannot be the same.  I.e., one cannot
8420:    call MatSolves(A,x,x).

8422:    Notes:
8423:    Most users should employ the simplified KSP interface for linear solvers
8424:    instead of working directly with matrix algebra routines such as this.
8425:    See, e.g., KSPCreate().

8427:    Level: developer

8429:    Concepts: matrices^triangular solves

8431: .seealso: MatSolveAdd(), MatSolveTranspose(), MatSolveTransposeAdd(), MatSolve()
8432: @*/
8433: PetscErrorCode MatSolves(Mat mat,Vecs b,Vecs x)
8434: {

8440:   if (x == b) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_IDN,"x and b must be different vectors");
8441:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Unfactored matrix");
8442:   if (!mat->rmap->N && !mat->cmap->N) return(0);

8444:   if (!mat->ops->solves) SETERRQ1(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)mat)->type_name);
8445:   MatCheckPreallocated(mat,1);
8446:   PetscLogEventBegin(MAT_Solves,mat,0,0,0);
8447:   (*mat->ops->solves)(mat,b,x);
8448:   PetscLogEventEnd(MAT_Solves,mat,0,0,0);
8449:   return(0);
8450: }

8454: /*@
8455:    MatIsSymmetric - Test whether a matrix is symmetric

8457:    Collective on Mat

8459:    Input Parameter:
8460: +  A - the matrix to test
8461: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact transpose)

8463:    Output Parameters:
8464: .  flg - the result

8466:    Notes: For real numbers MatIsSymmetric() and MatIsHermitian() return identical results

8468:    Level: intermediate

8470:    Concepts: matrix^symmetry

8472: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetricKnown()
8473: @*/
8474: PetscErrorCode MatIsSymmetric(Mat A,PetscReal tol,PetscBool  *flg)
8475: {


8482:   if (!A->symmetric_set) {
8483:     if (!A->ops->issymmetric) {
8484:       MatType mattype;
8485:       MatGetType(A,&mattype);
8486:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8487:     }
8488:     (*A->ops->issymmetric)(A,tol,flg);
8489:     if (!tol) {
8490:       A->symmetric_set = PETSC_TRUE;
8491:       A->symmetric     = *flg;
8492:       if (A->symmetric) {
8493:         A->structurally_symmetric_set = PETSC_TRUE;
8494:         A->structurally_symmetric     = PETSC_TRUE;
8495:       }
8496:     }
8497:   } else if (A->symmetric) {
8498:     *flg = PETSC_TRUE;
8499:   } else if (!tol) {
8500:     *flg = PETSC_FALSE;
8501:   } else {
8502:     if (!A->ops->issymmetric) {
8503:       MatType mattype;
8504:       MatGetType(A,&mattype);
8505:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for symmetric",mattype);
8506:     }
8507:     (*A->ops->issymmetric)(A,tol,flg);
8508:   }
8509:   return(0);
8510: }

8514: /*@
8515:    MatIsHermitian - Test whether a matrix is Hermitian

8517:    Collective on Mat

8519:    Input Parameter:
8520: +  A - the matrix to test
8521: -  tol - difference between value and its transpose less than this amount counts as equal (use 0.0 for exact Hermitian)

8523:    Output Parameters:
8524: .  flg - the result

8526:    Level: intermediate

8528:    Concepts: matrix^symmetry

8530: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(),
8531:           MatIsSymmetricKnown(), MatIsSymmetric()
8532: @*/
8533: PetscErrorCode MatIsHermitian(Mat A,PetscReal tol,PetscBool  *flg)
8534: {


8541:   if (!A->hermitian_set) {
8542:     if (!A->ops->ishermitian) {
8543:       MatType mattype;
8544:       MatGetType(A,&mattype);
8545:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8546:     }
8547:     (*A->ops->ishermitian)(A,tol,flg);
8548:     if (!tol) {
8549:       A->hermitian_set = PETSC_TRUE;
8550:       A->hermitian     = *flg;
8551:       if (A->hermitian) {
8552:         A->structurally_symmetric_set = PETSC_TRUE;
8553:         A->structurally_symmetric     = PETSC_TRUE;
8554:       }
8555:     }
8556:   } else if (A->hermitian) {
8557:     *flg = PETSC_TRUE;
8558:   } else if (!tol) {
8559:     *flg = PETSC_FALSE;
8560:   } else {
8561:     if (!A->ops->ishermitian) {
8562:       MatType mattype;
8563:       MatGetType(A,&mattype);
8564:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix of type <%s> does not support checking for hermitian",mattype);
8565:     }
8566:     (*A->ops->ishermitian)(A,tol,flg);
8567:   }
8568:   return(0);
8569: }

8573: /*@
8574:    MatIsSymmetricKnown - Checks the flag on the matrix to see if it is symmetric.

8576:    Not Collective

8578:    Input Parameter:
8579: .  A - the matrix to check

8581:    Output Parameters:
8582: +  set - if the symmetric flag is set (this tells you if the next flag is valid)
8583: -  flg - the result

8585:    Level: advanced

8587:    Concepts: matrix^symmetry

8589:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsSymmetric()
8590:          if you want it explicitly checked

8592: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8593: @*/
8594: PetscErrorCode MatIsSymmetricKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8595: {
8600:   if (A->symmetric_set) {
8601:     *set = PETSC_TRUE;
8602:     *flg = A->symmetric;
8603:   } else {
8604:     *set = PETSC_FALSE;
8605:   }
8606:   return(0);
8607: }

8611: /*@
8612:    MatIsHermitianKnown - Checks the flag on the matrix to see if it is hermitian.

8614:    Not Collective

8616:    Input Parameter:
8617: .  A - the matrix to check

8619:    Output Parameters:
8620: +  set - if the hermitian flag is set (this tells you if the next flag is valid)
8621: -  flg - the result

8623:    Level: advanced

8625:    Concepts: matrix^symmetry

8627:    Note: Does not check the matrix values directly, so this may return unknown (set = PETSC_FALSE). Use MatIsHermitian()
8628:          if you want it explicitly checked

8630: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsStructurallySymmetric(), MatSetOption(), MatIsSymmetric()
8631: @*/
8632: PetscErrorCode MatIsHermitianKnown(Mat A,PetscBool  *set,PetscBool  *flg)
8633: {
8638:   if (A->hermitian_set) {
8639:     *set = PETSC_TRUE;
8640:     *flg = A->hermitian;
8641:   } else {
8642:     *set = PETSC_FALSE;
8643:   }
8644:   return(0);
8645: }

8649: /*@
8650:    MatIsStructurallySymmetric - Test whether a matrix is structurally symmetric

8652:    Collective on Mat

8654:    Input Parameter:
8655: .  A - the matrix to test

8657:    Output Parameters:
8658: .  flg - the result

8660:    Level: intermediate

8662:    Concepts: matrix^symmetry

8664: .seealso: MatTranspose(), MatIsTranspose(), MatIsHermitian(), MatIsSymmetric(), MatSetOption()
8665: @*/
8666: PetscErrorCode MatIsStructurallySymmetric(Mat A,PetscBool  *flg)
8667: {

8673:   if (!A->structurally_symmetric_set) {
8674:     if (!A->ops->isstructurallysymmetric) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix does not support checking for structural symmetric");
8675:     (*A->ops->isstructurallysymmetric)(A,&A->structurally_symmetric);

8677:     A->structurally_symmetric_set = PETSC_TRUE;
8678:   }
8679:   *flg = A->structurally_symmetric;
8680:   return(0);
8681: }

8685: extern PetscErrorCode MatStashGetInfo_Private(MatStash*,PetscInt*,PetscInt*);
8686: /*@
8687:    MatStashGetInfo - Gets how many values are currently in the matrix stash, i.e. need
8688:        to be communicated to other processors during the MatAssemblyBegin/End() process

8690:     Not collective

8692:    Input Parameter:
8693: .   vec - the vector

8695:    Output Parameters:
8696: +   nstash   - the size of the stash
8697: .   reallocs - the number of additional mallocs incurred.
8698: .   bnstash   - the size of the block stash
8699: -   breallocs - the number of additional mallocs incurred.in the block stash

8701:    Level: advanced

8703: .seealso: MatAssemblyBegin(), MatAssemblyEnd(), Mat, MatStashSetInitialSize()

8705: @*/
8706: PetscErrorCode MatStashGetInfo(Mat mat,PetscInt *nstash,PetscInt *reallocs,PetscInt *bnstash,PetscInt *breallocs)
8707: {

8711:   MatStashGetInfo_Private(&mat->stash,nstash,reallocs);
8712:   MatStashGetInfo_Private(&mat->bstash,bnstash,breallocs);
8713:   return(0);
8714: }

8718: /*@C
8719:    MatCreateVecs - Get vector(s) compatible with the matrix, i.e. with the same
8720:      parallel layout

8722:    Collective on Mat

8724:    Input Parameter:
8725: .  mat - the matrix

8727:    Output Parameter:
8728: +   right - (optional) vector that the matrix can be multiplied against
8729: -   left - (optional) vector that the matrix vector product can be stored in

8731:    Notes:
8732:     The blocksize of the returned vectors is determined by the row and column block sizes set with MatSetBlockSizes() or the single blocksize (same for both) set by MatSetBlockSize().

8734:   Notes: These are new vectors which are not owned by the Mat, they should be destroyed in VecDestroy() when no longer needed

8736:   Level: advanced

8738: .seealso: MatCreate(), VecDestroy()
8739: @*/
8740: PetscErrorCode MatCreateVecs(Mat mat,Vec *right,Vec *left)
8741: {

8747:   if (mat->ops->getvecs) {
8748:     (*mat->ops->getvecs)(mat,right,left);
8749:   } else {
8750:     PetscInt rbs,cbs;
8751:     MatGetBlockSizes(mat,&rbs,&cbs);
8752:     if (right) {
8753:       if (mat->cmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for columns not yet setup");
8754:       VecCreate(PetscObjectComm((PetscObject)mat),right);
8755:       VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
8756:       VecSetBlockSize(*right,cbs);
8757:       VecSetType(*right,VECSTANDARD);
8758:       PetscLayoutReference(mat->cmap,&(*right)->map);
8759:     }
8760:     if (left) {
8761:       if (mat->rmap->n < 0) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"PetscLayout for rows not yet setup");
8762:       VecCreate(PetscObjectComm((PetscObject)mat),left);
8763:       VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
8764:       VecSetBlockSize(*left,rbs);
8765:       VecSetType(*left,VECSTANDARD);
8766:       PetscLayoutReference(mat->rmap,&(*left)->map);
8767:     }
8768:   }
8769:   return(0);
8770: }

8774: /*@C
8775:    MatFactorInfoInitialize - Initializes a MatFactorInfo data structure
8776:      with default values.

8778:    Not Collective

8780:    Input Parameters:
8781: .    info - the MatFactorInfo data structure


8784:    Notes: The solvers are generally used through the KSP and PC objects, for example
8785:           PCLU, PCILU, PCCHOLESKY, PCICC

8787:    Level: developer

8789: .seealso: MatFactorInfo

8791:     Developer Note: fortran interface is not autogenerated as the f90
8792:     interface defintion cannot be generated correctly [due to MatFactorInfo]

8794: @*/

8796: PetscErrorCode MatFactorInfoInitialize(MatFactorInfo *info)
8797: {

8801:   PetscMemzero(info,sizeof(MatFactorInfo));
8802:   return(0);
8803: }

8807: /*@
8808:    MatFactorSetSchurIS - Set indices corresponding to the Schur complement

8810:    Collective on Mat

8812:    Input Parameters:
8813: +  mat - the factored matrix
8814: -  is - the index set defining the Schur indices (0-based)

8816:    Notes:

8818:    Level: developer

8820:    Concepts:

8822: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement()

8824: @*/
8825: PetscErrorCode MatFactorSetSchurIS(Mat mat,IS is)
8826: {
8827:   PetscErrorCode ierr,(*f)(Mat,IS);

8835:   if (!mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Only for factored matrix");
8836:   PetscObjectQueryFunction((PetscObject)mat,"MatFactorSetSchurIS_C",&f);
8837:   if (!f) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"The selected MatSolverPackage does not support Schur complement computation. You should use MATSOLVERMUMPS or MATSOLVERMKL_PARDISO");
8838:   (*f)(mat,is);
8839:   return(0);
8840: }

8844: /*@
8845:   MatFactorCreateSchurComplement - Create a Schur complement matrix object using Schur data computed during the factorization step

8847:    Logically Collective on Mat

8849:    Input Parameters:
8850: +  F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
8851: .  *S - location where to return the Schur complement (MATDENSE)

8853:    Notes:
8854:    The routine provides a copy of the Schur data stored within solver's data strutures. The caller must destroy the object when it is no longer needed.
8855:    If MatFactorInvertSchurComplement has been called, the routine gets back the inverse

8857:    Level: advanced

8859:    References:

8861: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorGetSchurComplement()
8862: @*/
8863: PetscErrorCode MatFactorCreateSchurComplement(Mat F,Mat* S)
8864: {

8869:   PetscUseMethod(F,"MatFactorCreateSchurComplement_C",(Mat,Mat*),(F,S));
8870:   return(0);
8871: }

8875: /*@
8876:   MatFactorGetSchurComplement - Get a Schur complement matrix object using the current Schur data

8878:    Logically Collective on Mat

8880:    Input Parameters:
8881: +  F - the factored matrix obtained by calling MatGetFactor()
8882: .  *S - location where to return the Schur complement (in MATDENSE format)

8884:    Notes:
8885:    Schur complement mode is currently implemented for sequential matrices.
8886:    The routine returns a dense matrix pointing to the raw data of the Schur Complement stored within the data strutures of the solver; e.g. if MatFactorInvertSchurComplement has been called, the returned matrix is actually the inverse of the Schur complement.
8887:    The caller should call MatFactorRestoreSchurComplement when the object is no longer needed.

8889:    Level: advanced

8891:    References:

8893: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement()
8894: @*/
8895: PetscErrorCode MatFactorGetSchurComplement(Mat F,Mat* S)
8896: {

8901:   PetscUseMethod(F,"MatFactorGetSchurComplement_C",(Mat,Mat*),(F,S));
8902:   return(0);
8903: }

8907: /*@
8908:   MatFactorRestoreSchurComplement - Restore the Schur complement matrix object obtained from a call to MatFactorGetSchurComplement

8910:    Logically Collective on Mat

8912:    Input Parameters:
8913: +  F - the factored matrix obtained by calling MatGetFactor()
8914: .  *S - location where the Schur complement is stored

8916:    Notes:

8918:    Level: advanced

8920:    References:

8922: .seealso: MatGetFactor(), MatFactorSetSchurIS(), MatFactorRestoreSchurComplement(), MatFactorCreateSchurComplement()
8923: @*/
8924: PetscErrorCode MatFactorRestoreSchurComplement(Mat F,Mat* S)
8925: {

8931:   MatDestroy(S);
8932:   return(0);
8933: }

8937: /*@
8938:   MatFactorSolveSchurComplementTranspose - Solve the transpose of the Schur complement system computed during the factorization step

8940:    Logically Collective on Mat

8942:    Input Parameters:
8943: +  F - the factored matrix obtained by calling MatGetFactor()
8944: .  rhs - location where the right hand side of the Schur complement system is stored
8945: -  sol - location where the solution of the Schur complement system has to be returned

8947:    Notes:
8948:    The sizes of the vectors should match the size of the Schur complement

8950:    Level: advanced

8952:    References:

8954: .seealso: MatGetFactor(), MatFactorSetSchurIS()
8955: @*/
8956: PetscErrorCode MatFactorSolveSchurComplementTranspose(Mat F, Vec rhs, Vec sol)
8957: {

8966:   PetscUseMethod(F,"MatFactorSolveSchurComplementTranspose_C",(Mat,Vec,Vec),(F,rhs,sol));
8967:   return(0);
8968: }

8972: /*@
8973:   MatFactorSolveSchurComplement - Solve the Schur complement system computed during the factorization step

8975:    Logically Collective on Mat

8977:    Input Parameters:
8978: +  F - the factored matrix obtained by calling MatGetFactor()
8979: .  rhs - location where the right hand side of the Schur complement system is stored
8980: -  sol - location where the solution of the Schur complement system has to be returned

8982:    Notes:
8983:    The sizes of the vectors should match the size of the Schur complement

8985:    Level: advanced

8987:    References:

8989: .seealso: MatGetFactor(), MatFactorSetSchurIS()
8990: @*/
8991: PetscErrorCode MatFactorSolveSchurComplement(Mat F, Vec rhs, Vec sol)
8992: {

9001:   PetscUseMethod(F,"MatFactorSolveSchurComplement_C",(Mat,Vec,Vec),(F,rhs,sol));
9002:   return(0);
9003: }

9007: /*@
9008:   MatFactorInvertSchurComplement - Invert the raw Schur data computed during the factorization step

9010:    Logically Collective on Mat

9012:    Input Parameters:
9013: +  F - the factored matrix obtained by calling MatGetFactor()

9015:    Notes:

9017:    Level: advanced

9019:    References:

9021: .seealso: MatGetFactor(), MatFactorSetSchurIS()
9022: @*/
9023: PetscErrorCode MatFactorInvertSchurComplement(Mat F)
9024: {

9029:   PetscUseMethod(F,"MatFactorInvertSchurComplement_C",(Mat),(F));
9030:   return(0);
9031: }


9036: /*@
9037:    MatPtAP - Creates the matrix product C = P^T * A * P

9039:    Neighbor-wise Collective on Mat

9041:    Input Parameters:
9042: +  A - the matrix
9043: .  P - the projection matrix
9044: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9045: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)), use PETSC_DEFAULT if you do not have a good estimate
9046:           if the result is a dense matrix this is irrelevent

9048:    Output Parameters:
9049: .  C - the product matrix

9051:    Notes:
9052:    C will be created and must be destroyed by the user with MatDestroy().

9054:    This routine is currently only implemented for pairs of AIJ matrices and classes
9055:    which inherit from AIJ.

9057:    Level: intermediate

9059: .seealso: MatPtAPSymbolic(), MatPtAPNumeric(), MatMatMult(), MatRARt()
9060: @*/
9061: PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
9062: {
9064:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9065:   PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat*);
9066:   PetscErrorCode (*ptap)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;
9067:   PetscBool      viatranspose=PETSC_FALSE,viamatmatmatmult=PETSC_FALSE;

9070:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viatranspose",&viatranspose,NULL);
9071:   PetscOptionsGetBool(((PetscObject)A)->options,((PetscObject)A)->prefix,"-matptap_viamatmatmatmult",&viamatmatmatmult,NULL);

9075:   MatCheckPreallocated(A,1);
9076:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9077:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9080:   MatCheckPreallocated(P,2);
9081:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9082:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9084:   if (A->rmap->N!= A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix A must be square, %D != %D",A->rmap->N,A->cmap->N);
9085:   if (P->rmap->N != A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9086:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9087:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9089:   if (scall == MAT_REUSE_MATRIX) {
9092:     if (viatranspose || viamatmatmatmult) {
9093:       Mat Pt;
9094:       MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9095:       if (viamatmatmatmult) {
9096:         MatMatMatMult(Pt,A,P,scall,fill,C);
9097:       } else {
9098:         Mat AP;
9099:         MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9100:         MatMatMult(Pt,AP,scall,fill,C);
9101:         MatDestroy(&AP);
9102:       }
9103:       MatDestroy(&Pt);
9104:     } else {
9105:       PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9106:       PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9107:       (*(*C)->ops->ptapnumeric)(A,P,*C);
9108:       PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9109:       PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9110:     }
9111:     return(0);
9112:   }

9114:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9115:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9117:   fA = A->ops->ptap;
9118:   fP = P->ops->ptap;
9119:   if (fP == fA) {
9120:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",((PetscObject)A)->type_name);
9121:     ptap = fA;
9122:   } else {
9123:     /* dispatch based on the type of A and P from their PetscObject's PetscFunctionLists. */
9124:     char ptapname[256];
9125:     PetscStrcpy(ptapname,"MatPtAP_");
9126:     PetscStrcat(ptapname,((PetscObject)A)->type_name);
9127:     PetscStrcat(ptapname,"_");
9128:     PetscStrcat(ptapname,((PetscObject)P)->type_name);
9129:     PetscStrcat(ptapname,"_C"); /* e.g., ptapname = "MatPtAP_seqdense_seqaij_C" */
9130:     PetscObjectQueryFunction((PetscObject)P,ptapname,&ptap);
9131:     if (!ptap) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
9132:   }

9134:   if (viatranspose || viamatmatmatmult) {
9135:     Mat Pt;
9136:     MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);
9137:     if (viamatmatmatmult) {
9138:       MatMatMatMult(Pt,A,P,scall,fill,C);
9139:       PetscInfo(*C,"MatPtAP via MatMatMatMult\n");
9140:     } else {
9141:       Mat AP;
9142:       MatMatMult(A,P,MAT_INITIAL_MATRIX,fill,&AP);
9143:       MatMatMult(Pt,AP,scall,fill,C);
9144:       MatDestroy(&AP);
9145:       PetscInfo(*C,"MatPtAP via MatTranspose and MatMatMult\n");
9146:     }
9147:     MatDestroy(&Pt);
9148:   } else {
9149:     PetscLogEventBegin(MAT_PtAP,A,P,0,0);
9150:     (*ptap)(A,P,scall,fill,C);
9151:     PetscLogEventEnd(MAT_PtAP,A,P,0,0);
9152:   }
9153:   return(0);
9154: }

9158: /*@
9159:    MatPtAPNumeric - Computes the matrix product C = P^T * A * P

9161:    Neighbor-wise Collective on Mat

9163:    Input Parameters:
9164: +  A - the matrix
9165: -  P - the projection matrix

9167:    Output Parameters:
9168: .  C - the product matrix

9170:    Notes:
9171:    C must have been created by calling MatPtAPSymbolic and must be destroyed by
9172:    the user using MatDeatroy().

9174:    This routine is currently only implemented for pairs of AIJ matrices and classes
9175:    which inherit from AIJ.  C will be of type MATAIJ.

9177:    Level: intermediate

9179: .seealso: MatPtAP(), MatPtAPSymbolic(), MatMatMultNumeric()
9180: @*/
9181: PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C)
9182: {

9188:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9189:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9192:   MatCheckPreallocated(P,2);
9193:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9194:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9197:   MatCheckPreallocated(C,3);
9198:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9199:   if (P->cmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->rmap->N);
9200:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9201:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9202:   if (P->cmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->cmap->N,C->cmap->N);
9203:   MatCheckPreallocated(A,1);

9205:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
9206:   (*C->ops->ptapnumeric)(A,P,C);
9207:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
9208:   return(0);
9209: }

9213: /*@
9214:    MatPtAPSymbolic - Creates the (i,j) structure of the matrix product C = P^T * A * P

9216:    Neighbor-wise Collective on Mat

9218:    Input Parameters:
9219: +  A - the matrix
9220: -  P - the projection matrix

9222:    Output Parameters:
9223: .  C - the (i,j) structure of the product matrix

9225:    Notes:
9226:    C will be created and must be destroyed by the user with MatDestroy().

9228:    This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9229:    which inherit from SeqAIJ.  C will be of type MATSEQAIJ.  The product is computed using
9230:    this (i,j) structure by calling MatPtAPNumeric().

9232:    Level: intermediate

9234: .seealso: MatPtAP(), MatPtAPNumeric(), MatMatMultSymbolic()
9235: @*/
9236: PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C)
9237: {

9243:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9244:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9245:   if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9248:   MatCheckPreallocated(P,2);
9249:   if (!P->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9250:   if (P->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9253:   if (P->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",P->rmap->N,A->cmap->N);
9254:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9255:   MatCheckPreallocated(A,1);
9256:   PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
9257:   (*A->ops->ptapsymbolic)(A,P,fill,C);
9258:   PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);

9260:   /* MatSetBlockSize(*C,A->rmap->bs); NO! this is not always true -ma */
9261:   return(0);
9262: }

9266: /*@
9267:    MatRARt - Creates the matrix product C = R * A * R^T

9269:    Neighbor-wise Collective on Mat

9271:    Input Parameters:
9272: +  A - the matrix
9273: .  R - the projection matrix
9274: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9275: -  fill - expected fill as ratio of nnz(C)/nnz(A), use PETSC_DEFAULT if you do not have a good estimate
9276:           if the result is a dense matrix this is irrelevent

9278:    Output Parameters:
9279: .  C - the product matrix

9281:    Notes:
9282:    C will be created and must be destroyed by the user with MatDestroy().

9284:    This routine is currently only implemented for pairs of AIJ matrices and classes
9285:    which inherit from AIJ.

9287:    Level: intermediate

9289: .seealso: MatRARtSymbolic(), MatRARtNumeric(), MatMatMult(), MatPtAP()
9290: @*/
9291: PetscErrorCode MatRARt(Mat A,Mat R,MatReuse scall,PetscReal fill,Mat *C)
9292: {

9298:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9299:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9302:   MatCheckPreallocated(R,2);
9303:   if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9304:   if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9306:   if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)R),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);

9308:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9309:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9310:   MatCheckPreallocated(A,1);

9312:   if (!A->ops->rart) {
9313:     MatType mattype;
9314:     MatGetType(A,&mattype);
9315:     SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix of type <%s> does not support RARt",mattype);
9316:   }
9317:   PetscLogEventBegin(MAT_RARt,A,R,0,0);
9318:   (*A->ops->rart)(A,R,scall,fill,C);
9319:   PetscLogEventEnd(MAT_RARt,A,R,0,0);
9320:   return(0);
9321: }

9325: /*@
9326:    MatRARtNumeric - Computes the matrix product C = R * A * R^T

9328:    Neighbor-wise Collective on Mat

9330:    Input Parameters:
9331: +  A - the matrix
9332: -  R - the projection matrix

9334:    Output Parameters:
9335: .  C - the product matrix

9337:    Notes:
9338:    C must have been created by calling MatRARtSymbolic and must be destroyed by
9339:    the user using MatDestroy().

9341:    This routine is currently only implemented for pairs of AIJ matrices and classes
9342:    which inherit from AIJ.  C will be of type MATAIJ.

9344:    Level: intermediate

9346: .seealso: MatRARt(), MatRARtSymbolic(), MatMatMultNumeric()
9347: @*/
9348: PetscErrorCode MatRARtNumeric(Mat A,Mat R,Mat C)
9349: {

9355:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9356:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9359:   MatCheckPreallocated(R,2);
9360:   if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9361:   if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9364:   MatCheckPreallocated(C,3);
9365:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9366:   if (R->rmap->N!=C->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->rmap->N);
9367:   if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9368:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9369:   if (R->rmap->N!=C->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->rmap->N,C->cmap->N);
9370:   MatCheckPreallocated(A,1);

9372:   PetscLogEventBegin(MAT_RARtNumeric,A,R,0,0);
9373:   (*A->ops->rartnumeric)(A,R,C);
9374:   PetscLogEventEnd(MAT_RARtNumeric,A,R,0,0);
9375:   return(0);
9376: }

9380: /*@
9381:    MatRARtSymbolic - Creates the (i,j) structure of the matrix product C = R * A * R^T

9383:    Neighbor-wise Collective on Mat

9385:    Input Parameters:
9386: +  A - the matrix
9387: -  R - the projection matrix

9389:    Output Parameters:
9390: .  C - the (i,j) structure of the product matrix

9392:    Notes:
9393:    C will be created and must be destroyed by the user with MatDestroy().

9395:    This routine is currently only implemented for pairs of SeqAIJ matrices and classes
9396:    which inherit from SeqAIJ.  C will be of type MATSEQAIJ.  The product is computed using
9397:    this (i,j) structure by calling MatRARtNumeric().

9399:    Level: intermediate

9401: .seealso: MatRARt(), MatRARtNumeric(), MatMatMultSymbolic()
9402: @*/
9403: PetscErrorCode MatRARtSymbolic(Mat A,Mat R,PetscReal fill,Mat *C)
9404: {

9410:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9411:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9412:   if (fill <1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);
9415:   MatCheckPreallocated(R,2);
9416:   if (!R->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9417:   if (R->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9420:   if (R->cmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",R->cmap->N,A->rmap->N);
9421:   if (A->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %D != %D",A->rmap->N,A->cmap->N);
9422:   MatCheckPreallocated(A,1);
9423:   PetscLogEventBegin(MAT_RARtSymbolic,A,R,0,0);
9424:   (*A->ops->rartsymbolic)(A,R,fill,C);
9425:   PetscLogEventEnd(MAT_RARtSymbolic,A,R,0,0);

9427:   MatSetBlockSizes(*C,PetscAbs(R->rmap->bs),PetscAbs(R->rmap->bs));
9428:   return(0);
9429: }

9433: /*@
9434:    MatMatMult - Performs Matrix-Matrix Multiplication C=A*B.

9436:    Neighbor-wise Collective on Mat

9438:    Input Parameters:
9439: +  A - the left matrix
9440: .  B - the right matrix
9441: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9442: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate
9443:           if the result is a dense matrix this is irrelevent

9445:    Output Parameters:
9446: .  C - the product matrix

9448:    Notes:
9449:    Unless scall is MAT_REUSE_MATRIX C will be created.

9451:    MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call

9453:    To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9454:    actually needed.

9456:    If you have many matrices with the same non-zero structure to multiply, you
9457:    should either
9458: $   1) use MAT_REUSE_MATRIX in all calls but the first or
9459: $   2) call MatMatMultSymbolic() once and then MatMatMultNumeric() for each product needed
9460:    In the special case where matrix B (and hence C) are dense you can create the correctly sized matrix C yourself and then call this routine
9461:    with MAT_REUSE_MATRIX, rather than first having MatMatMult() create it for you. You can NEVER do this if the matrix C is sparse.

9463:    Level: intermediate

9465: .seealso: MatMatMultSymbolic(), MatMatMultNumeric(), MatTransposeMatMult(),  MatMatTransposeMult(), MatPtAP()
9466: @*/
9467: PetscErrorCode MatMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9468: {
9470:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9471:   PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9472:   PetscErrorCode (*mult)(Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;

9477:   MatCheckPreallocated(A,1);
9478:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9479:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9482:   MatCheckPreallocated(B,2);
9483:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9484:   if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9486:   if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9487:   if (scall == MAT_REUSE_MATRIX) {
9490:     PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9491:     PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);
9492:     (*(*C)->ops->matmultnumeric)(A,B,*C);
9493:     PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);
9494:     PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9495:     return(0);
9496:   }
9497:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9498:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9500:   fA = A->ops->matmult;
9501:   fB = B->ops->matmult;
9502:   if (fB == fA) {
9503:     if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMult not supported for B of type %s",((PetscObject)B)->type_name);
9504:     mult = fB;
9505:   } else {
9506:     /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9507:     char multname[256];
9508:     PetscStrcpy(multname,"MatMatMult_");
9509:     PetscStrcat(multname,((PetscObject)A)->type_name);
9510:     PetscStrcat(multname,"_");
9511:     PetscStrcat(multname,((PetscObject)B)->type_name);
9512:     PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9513:     PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9514:     if (!mult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9515:   }
9516:   PetscLogEventBegin(MAT_MatMult,A,B,0,0);
9517:   (*mult)(A,B,scall,fill,C);
9518:   PetscLogEventEnd(MAT_MatMult,A,B,0,0);
9519:   return(0);
9520: }

9524: /*@
9525:    MatMatMultSymbolic - Performs construction, preallocation, and computes the ij structure
9526:    of the matrix-matrix product C=A*B.  Call this routine before calling MatMatMultNumeric().

9528:    Neighbor-wise Collective on Mat

9530:    Input Parameters:
9531: +  A - the left matrix
9532: .  B - the right matrix
9533: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if you do not have a good estimate,
9534:       if C is a dense matrix this is irrelevent

9536:    Output Parameters:
9537: .  C - the product matrix

9539:    Notes:
9540:    Unless scall is MAT_REUSE_MATRIX C will be created.

9542:    To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9543:    actually needed.

9545:    This routine is currently implemented for
9546:     - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type AIJ
9547:     - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9548:     - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.

9550:    Level: intermediate

9552:    Developers Note: There are ways to estimate the number of nonzeros in the resulting product, see for example, http://arxiv.org/abs/1006.4173
9553:      We should incorporate them into PETSc.

9555: .seealso: MatMatMult(), MatMatMultNumeric()
9556: @*/
9557: PetscErrorCode MatMatMultSymbolic(Mat A,Mat B,PetscReal fill,Mat *C)
9558: {
9560:   PetscErrorCode (*Asymbolic)(Mat,Mat,PetscReal,Mat*);
9561:   PetscErrorCode (*Bsymbolic)(Mat,Mat,PetscReal,Mat*);
9562:   PetscErrorCode (*symbolic)(Mat,Mat,PetscReal,Mat*)=NULL;

9567:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9568:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9572:   MatCheckPreallocated(B,2);
9573:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9574:   if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

9577:   if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9578:   if (fill == PETSC_DEFAULT) fill = 2.0;
9579:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9580:   MatCheckPreallocated(A,1);

9582:   Asymbolic = A->ops->matmultsymbolic;
9583:   Bsymbolic = B->ops->matmultsymbolic;
9584:   if (Asymbolic == Bsymbolic) {
9585:     if (!Bsymbolic) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"C=A*B not implemented for B of type %s",((PetscObject)B)->type_name);
9586:     symbolic = Bsymbolic;
9587:   } else { /* dispatch based on the type of A and B */
9588:     char symbolicname[256];
9589:     PetscStrcpy(symbolicname,"MatMatMultSymbolic_");
9590:     PetscStrcat(symbolicname,((PetscObject)A)->type_name);
9591:     PetscStrcat(symbolicname,"_");
9592:     PetscStrcat(symbolicname,((PetscObject)B)->type_name);
9593:     PetscStrcat(symbolicname,"_C");
9594:     PetscObjectQueryFunction((PetscObject)B,symbolicname,&symbolic);
9595:     if (!symbolic) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMultSymbolic requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9596:   }
9597:   PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);
9598:   (*symbolic)(A,B,fill,C);
9599:   PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);
9600:   return(0);
9601: }

9605: /*@
9606:    MatMatMultNumeric - Performs the numeric matrix-matrix product.
9607:    Call this routine after first calling MatMatMultSymbolic().

9609:    Neighbor-wise Collective on Mat

9611:    Input Parameters:
9612: +  A - the left matrix
9613: -  B - the right matrix

9615:    Output Parameters:
9616: .  C - the product matrix, which was created by from MatMatMultSymbolic() or a call to MatMatMult().

9618:    Notes:
9619:    C must have been created with MatMatMultSymbolic().

9621:    This routine is currently implemented for
9622:     - pairs of AIJ matrices and classes which inherit from AIJ, C will be of type MATAIJ.
9623:     - pairs of AIJ (A) and Dense (B) matrix, C will be of type Dense.
9624:     - pairs of Dense (A) and AIJ (B) matrix, C will be of type Dense.

9626:    Level: intermediate

9628: .seealso: MatMatMult(), MatMatMultSymbolic()
9629: @*/
9630: PetscErrorCode MatMatMultNumeric(Mat A,Mat B,Mat C)
9631: {

9635:   MatMatMult(A,B,MAT_REUSE_MATRIX,0.0,&C);
9636:   return(0);
9637: }

9641: /*@
9642:    MatMatTransposeMult - Performs Matrix-Matrix Multiplication C=A*B^T.

9644:    Neighbor-wise Collective on Mat

9646:    Input Parameters:
9647: +  A - the left matrix
9648: .  B - the right matrix
9649: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9650: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known

9652:    Output Parameters:
9653: .  C - the product matrix

9655:    Notes:
9656:    C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().

9658:    MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call

9660:   To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9661:    actually needed.

9663:    This routine is currently only implemented for pairs of SeqAIJ matrices.  C will be of type MATSEQAIJ.

9665:    Level: intermediate

9667: .seealso: MatMatTransposeMultSymbolic(), MatMatTransposeMultNumeric(), MatMatMult(), MatTransposeMatMult() MatPtAP()
9668: @*/
9669: PetscErrorCode MatMatTransposeMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9670: {
9672:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9673:   PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);

9678:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9679:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9682:   MatCheckPreallocated(B,2);
9683:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9684:   if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9686:   if (B->cmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, AN %D != BN %D",A->cmap->N,B->cmap->N);
9687:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9688:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9689:   MatCheckPreallocated(A,1);

9691:   fA = A->ops->mattransposemult;
9692:   if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for A of type %s",((PetscObject)A)->type_name);
9693:   fB = B->ops->mattransposemult;
9694:   if (!fB) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatTransposeMult not supported for B of type %s",((PetscObject)B)->type_name);
9695:   if (fB!=fA) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatTransposeMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);

9697:   PetscLogEventBegin(MAT_MatTransposeMult,A,B,0,0);
9698:   if (scall == MAT_INITIAL_MATRIX) {
9699:     PetscLogEventBegin(MAT_MatTransposeMultSymbolic,A,B,0,0);
9700:     (*A->ops->mattransposemultsymbolic)(A,B,fill,C);
9701:     PetscLogEventEnd(MAT_MatTransposeMultSymbolic,A,B,0,0);
9702:   }
9703:   PetscLogEventBegin(MAT_MatTransposeMultNumeric,A,B,0,0);
9704:   (*A->ops->mattransposemultnumeric)(A,B,*C);
9705:   PetscLogEventEnd(MAT_MatTransposeMultNumeric,A,B,0,0);
9706:   PetscLogEventEnd(MAT_MatTransposeMult,A,B,0,0);
9707:   return(0);
9708: }

9712: /*@
9713:    MatTransposeMatMult - Performs Matrix-Matrix Multiplication C=A^T*B.

9715:    Neighbor-wise Collective on Mat

9717:    Input Parameters:
9718: +  A - the left matrix
9719: .  B - the right matrix
9720: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9721: -  fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(B)), use PETSC_DEFAULT if not known

9723:    Output Parameters:
9724: .  C - the product matrix

9726:    Notes:
9727:    C will be created if MAT_INITIAL_MATRIX and must be destroyed by the user with MatDestroy().

9729:    MAT_REUSE_MATRIX can only be used if the matrices A and B have the same nonzero pattern as in the previous call

9731:   To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9732:    actually needed.

9734:    This routine is currently implemented for pairs of AIJ matrices and pairs of SeqDense matrices and classes
9735:    which inherit from SeqAIJ.  C will be of same type as the input matrices.

9737:    Level: intermediate

9739: .seealso: MatTransposeMatMultSymbolic(), MatTransposeMatMultNumeric(), MatMatMult(), MatMatTransposeMult(), MatPtAP()
9740: @*/
9741: PetscErrorCode MatTransposeMatMult(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
9742: {
9744:   PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat*);
9745:   PetscErrorCode (*fB)(Mat,Mat,MatReuse,PetscReal,Mat*);
9746:   PetscErrorCode (*transposematmult)(Mat,Mat,MatReuse,PetscReal,Mat*) = NULL;

9751:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9752:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9755:   MatCheckPreallocated(B,2);
9756:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9757:   if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9759:   if (B->rmap->N!=A->rmap->N) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->rmap->N);
9760:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9761:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be > 1.0",(double)fill);
9762:   MatCheckPreallocated(A,1);

9764:   fA = A->ops->transposematmult;
9765:   fB = B->ops->transposematmult;
9766:   if (fB==fA) {
9767:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatTransposeMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9768:     transposematmult = fA;
9769:   } else {
9770:     /* dispatch based on the type of A and B from their PetscObject's PetscFunctionLists. */
9771:     char multname[256];
9772:     PetscStrcpy(multname,"MatTransposeMatMult_");
9773:     PetscStrcat(multname,((PetscObject)A)->type_name);
9774:     PetscStrcat(multname,"_");
9775:     PetscStrcat(multname,((PetscObject)B)->type_name);
9776:     PetscStrcat(multname,"_C"); /* e.g., multname = "MatMatMult_seqdense_seqaij_C" */
9777:     PetscObjectQueryFunction((PetscObject)B,multname,&transposematmult);
9778:     if (!transposematmult) SETERRQ2(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatTransposeMatMult requires A, %s, to be compatible with B, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name);
9779:   }
9780:   PetscLogEventBegin(MAT_TransposeMatMult,A,B,0,0);
9781:   (*transposematmult)(A,B,scall,fill,C);
9782:   PetscLogEventEnd(MAT_TransposeMatMult,A,B,0,0);
9783:   return(0);
9784: }

9788: /*@
9789:    MatMatMatMult - Performs Matrix-Matrix-Matrix Multiplication D=A*B*C.

9791:    Neighbor-wise Collective on Mat

9793:    Input Parameters:
9794: +  A - the left matrix
9795: .  B - the middle matrix
9796: .  C - the right matrix
9797: .  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
9798: -  fill - expected fill as ratio of nnz(D)/(nnz(A) + nnz(B)+nnz(C)), use PETSC_DEFAULT if you do not have a good estimate
9799:           if the result is a dense matrix this is irrelevent

9801:    Output Parameters:
9802: .  D - the product matrix

9804:    Notes:
9805:    Unless scall is MAT_REUSE_MATRIX D will be created.

9807:    MAT_REUSE_MATRIX can only be used if the matrices A, B and C have the same nonzero pattern as in the previous call

9809:    To determine the correct fill value, run with -info and search for the string "Fill ratio" to see the value
9810:    actually needed.

9812:    If you have many matrices with the same non-zero structure to multiply, you
9813:    should use MAT_REUSE_MATRIX in all calls but the first or

9815:    Level: intermediate

9817: .seealso: MatMatMult, MatPtAP()
9818: @*/
9819: PetscErrorCode MatMatMatMult(Mat A,Mat B,Mat C,MatReuse scall,PetscReal fill,Mat *D)
9820: {
9822:   PetscErrorCode (*fA)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9823:   PetscErrorCode (*fB)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9824:   PetscErrorCode (*fC)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
9825:   PetscErrorCode (*mult)(Mat,Mat,Mat,MatReuse,PetscReal,Mat*)=NULL;

9830:   MatCheckPreallocated(A,1);
9831:   if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9832:   if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9835:   MatCheckPreallocated(B,2);
9836:   if (!B->assembled) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9837:   if (B->factortype) SETERRQ(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9840:   MatCheckPreallocated(C,3);
9841:   if (!C->assembled) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9842:   if (C->factortype) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9843:   if (B->rmap->N!=A->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)B),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",B->rmap->N,A->cmap->N);
9844:   if (C->rmap->N!=B->cmap->N) SETERRQ2(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %D != %D",C->rmap->N,B->cmap->N);
9845:   if (scall == MAT_REUSE_MATRIX) {
9848:     PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9849:     (*(*D)->ops->matmatmult)(A,B,C,scall,fill,D);
9850:     PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9851:     return(0);
9852:   }
9853:   if (fill == PETSC_DEFAULT || fill == PETSC_DECIDE) fill = 2.0;
9854:   if (fill < 1.0) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Expected fill=%g must be >= 1.0",(double)fill);

9856:   fA = A->ops->matmatmult;
9857:   fB = B->ops->matmatmult;
9858:   fC = C->ops->matmatmult;
9859:   if (fA == fB && fA == fC) {
9860:     if (!fA) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MatMatMatMult not supported for A of type %s",((PetscObject)A)->type_name);
9861:     mult = fA;
9862:   } else {
9863:     /* dispatch based on the type of A, B and C from their PetscObject's PetscFunctionLists. */
9864:     char multname[256];
9865:     PetscStrcpy(multname,"MatMatMatMult_");
9866:     PetscStrcat(multname,((PetscObject)A)->type_name);
9867:     PetscStrcat(multname,"_");
9868:     PetscStrcat(multname,((PetscObject)B)->type_name);
9869:     PetscStrcat(multname,"_");
9870:     PetscStrcat(multname,((PetscObject)C)->type_name);
9871:     PetscStrcat(multname,"_C");
9872:     PetscObjectQueryFunction((PetscObject)B,multname,&mult);
9873:     if (!mult) SETERRQ3(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_INCOMP,"MatMatMatMult requires A, %s, to be compatible with B, %s, C, %s",((PetscObject)A)->type_name,((PetscObject)B)->type_name,((PetscObject)C)->type_name);
9874:   }
9875:   PetscLogEventBegin(MAT_MatMatMult,A,B,0,0);
9876:   (*mult)(A,B,C,scall,fill,D);
9877:   PetscLogEventEnd(MAT_MatMatMult,A,B,0,0);
9878:   return(0);
9879: }

9883: /*@
9884:    MatCreateRedundantMatrix - Create redundant matrices and put them into processors of subcommunicators.

9886:    Collective on Mat

9888:    Input Parameters:
9889: +  mat - the matrix
9890: .  nsubcomm - the number of subcommunicators (= number of redundant parallel or sequential matrices)
9891: .  subcomm - MPI communicator split from the communicator where mat resides in (or MPI_COMM_NULL if nsubcomm is used)
9892: -  reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

9894:    Output Parameter:
9895: .  matredundant - redundant matrix

9897:    Notes:
9898:    MAT_REUSE_MATRIX can only be used when the nonzero structure of the
9899:    original matrix has not changed from that last call to MatCreateRedundantMatrix().

9901:    This routine creates the duplicated matrices in subcommunicators; you should NOT create them before
9902:    calling it.

9904:    Level: advanced

9906:    Concepts: subcommunicator
9907:    Concepts: duplicate matrix

9909: .seealso: MatDestroy()
9910: @*/
9911: PetscErrorCode MatCreateRedundantMatrix(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,MatReuse reuse,Mat *matredundant)
9912: {
9914:   MPI_Comm       comm;
9915:   PetscMPIInt    size;
9916:   PetscInt       mloc_sub,rstart,rend,M=mat->rmap->N,N=mat->cmap->N,bs=mat->rmap->bs;
9917:   Mat_Redundant  *redund=NULL;
9918:   PetscSubcomm   psubcomm=NULL;
9919:   MPI_Comm       subcomm_in=subcomm;
9920:   Mat            *matseq;
9921:   IS             isrow,iscol;
9922:   PetscBool      newsubcomm=PETSC_FALSE;

9925:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);
9926:   if (size == 1 || nsubcomm == 1) {
9927:     if (reuse == MAT_INITIAL_MATRIX) {
9928:       MatDuplicate(mat,MAT_COPY_VALUES,matredundant);
9929:     } else {
9930:       MatCopy(mat,*matredundant,SAME_NONZERO_PATTERN);
9931:     }
9932:     return(0);
9933:   }

9936:   if (nsubcomm && reuse == MAT_REUSE_MATRIX) {
9939:   }
9940:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
9941:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
9942:   MatCheckPreallocated(mat,1);

9944:   PetscLogEventBegin(MAT_RedundantMat,mat,0,0,0);
9945:   if (subcomm_in == MPI_COMM_NULL && reuse == MAT_INITIAL_MATRIX) { /* get subcomm if user does not provide subcomm */
9946:     /* create psubcomm, then get subcomm */
9947:     PetscObjectGetComm((PetscObject)mat,&comm);
9948:     MPI_Comm_size(comm,&size);
9949:     if (nsubcomm < 1 || nsubcomm > size) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"nsubcomm must between 1 and %D",size);

9951:     PetscSubcommCreate(comm,&psubcomm);
9952:     PetscSubcommSetNumber(psubcomm,nsubcomm);
9953:     PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);
9954:     PetscSubcommSetFromOptions(psubcomm);
9955:     PetscCommDuplicate(PetscSubcommChild(psubcomm),&subcomm,NULL);
9956:     newsubcomm = PETSC_TRUE;
9957:     PetscSubcommDestroy(&psubcomm);
9958:   }

9960:   /* get isrow, iscol and a local sequential matrix matseq[0] */
9961:   if (reuse == MAT_INITIAL_MATRIX) {
9962:     mloc_sub = PETSC_DECIDE;
9963:     if (bs < 1) {
9964:       PetscSplitOwnership(subcomm,&mloc_sub,&M);
9965:     } else {
9966:       PetscSplitOwnershipBlock(subcomm,bs,&mloc_sub,&M);
9967:     }
9968:     MPI_Scan(&mloc_sub,&rend,1,MPIU_INT,MPI_SUM,subcomm);
9969:     rstart = rend - mloc_sub;
9970:     ISCreateStride(PETSC_COMM_SELF,mloc_sub,rstart,1,&isrow);
9971:     ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);
9972:   } else { /* reuse == MAT_REUSE_MATRIX */
9973:     /* retrieve subcomm */
9974:     PetscObjectGetComm((PetscObject)(*matredundant),&subcomm);
9975:     redund = (*matredundant)->redundant;
9976:     isrow  = redund->isrow;
9977:     iscol  = redund->iscol;
9978:     matseq = redund->matseq;
9979:   }
9980:   MatGetSubMatrices(mat,1,&isrow,&iscol,reuse,&matseq);

9982:   /* get matredundant over subcomm */
9983:   if (reuse == MAT_INITIAL_MATRIX) {
9984:     MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],mloc_sub,reuse,matredundant);

9986:     /* create a supporting struct and attach it to C for reuse */
9987:     PetscNewLog(*matredundant,&redund);
9988:     (*matredundant)->redundant = redund;
9989:     redund->isrow              = isrow;
9990:     redund->iscol              = iscol;
9991:     redund->matseq             = matseq;
9992:     if (newsubcomm) {
9993:       redund->subcomm          = subcomm;
9994:     } else {
9995:       redund->subcomm          = MPI_COMM_NULL;
9996:     }
9997:   } else {
9998:     MatCreateMPIMatConcatenateSeqMat(subcomm,matseq[0],PETSC_DECIDE,reuse,matredundant);
9999:   }
10000:   PetscLogEventEnd(MAT_RedundantMat,mat,0,0,0);
10001:   return(0);
10002: }

10006: /*@C
10007:    MatGetMultiProcBlock - Create multiple [bjacobi] 'parallel submatrices' from
10008:    a given 'mat' object. Each submatrix can span multiple procs.

10010:    Collective on Mat

10012:    Input Parameters:
10013: +  mat - the matrix
10014: .  subcomm - the subcommunicator obtained by com_split(comm)
10015: -  scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

10017:    Output Parameter:
10018: .  subMat - 'parallel submatrices each spans a given subcomm

10020:   Notes:
10021:   The submatrix partition across processors is dictated by 'subComm' a
10022:   communicator obtained by com_split(comm). The comm_split
10023:   is not restriced to be grouped with consecutive original ranks.

10025:   Due the comm_split() usage, the parallel layout of the submatrices
10026:   map directly to the layout of the original matrix [wrt the local
10027:   row,col partitioning]. So the original 'DiagonalMat' naturally maps
10028:   into the 'DiagonalMat' of the subMat, hence it is used directly from
10029:   the subMat. However the offDiagMat looses some columns - and this is
10030:   reconstructed with MatSetValues()

10032:   Level: advanced

10034:   Concepts: subcommunicator
10035:   Concepts: submatrices

10037: .seealso: MatGetSubMatrices()
10038: @*/
10039: PetscErrorCode   MatGetMultiProcBlock(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
10040: {
10042:   PetscMPIInt    commsize,subCommSize;

10045:   MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
10046:   MPI_Comm_size(subComm,&subCommSize);
10047:   if (subCommSize > commsize) SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_OUTOFRANGE,"CommSize %D < SubCommZize %D",commsize,subCommSize);

10049:   PetscLogEventBegin(MAT_GetMultiProcBlock,mat,0,0,0);
10050:   (*mat->ops->getmultiprocblock)(mat,subComm,scall,subMat);
10051:   PetscLogEventEnd(MAT_GetMultiProcBlock,mat,0,0,0);
10052:   return(0);
10053: }

10057: /*@
10058:    MatGetLocalSubMatrix - Gets a reference to a submatrix specified in local numbering

10060:    Not Collective

10062:    Input Arguments:
10063:    mat - matrix to extract local submatrix from
10064:    isrow - local row indices for submatrix
10065:    iscol - local column indices for submatrix

10067:    Output Arguments:
10068:    submat - the submatrix

10070:    Level: intermediate

10072:    Notes:
10073:    The submat should be returned with MatRestoreLocalSubMatrix().

10075:    Depending on the format of mat, the returned submat may not implement MatMult().  Its communicator may be
10076:    the same as mat, it may be PETSC_COMM_SELF, or some other subcomm of mat's.

10078:    The submat always implements MatSetValuesLocal().  If isrow and iscol have the same block size, then
10079:    MatSetValuesBlockedLocal() will also be implemented.

10081:    The mat must have had a ISLocalToGlobalMapping provided to it with MatSetLocalToGlobalMapping(). Note that 
10082:    matrices obtained with DMCreateMat() generally already have the local to global mapping provided.   

10084: .seealso: MatRestoreLocalSubMatrix(), MatCreateLocalRef(), MatSetLocalToGlobalMapping()
10085: @*/
10086: PetscErrorCode MatGetLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10087: {

10096:   if (!mat->rmap->mapping) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Matrix must have local to global mapping provided before this call");
10097: 
10098:   if (mat->ops->getlocalsubmatrix) {
10099:     (*mat->ops->getlocalsubmatrix)(mat,isrow,iscol,submat);
10100:   } else {
10101:     MatCreateLocalRef(mat,isrow,iscol,submat);
10102:   }
10103:   return(0);
10104: }

10108: /*@
10109:    MatRestoreLocalSubMatrix - Restores a reference to a submatrix specified in local numbering

10111:    Not Collective

10113:    Input Arguments:
10114:    mat - matrix to extract local submatrix from
10115:    isrow - local row indices for submatrix
10116:    iscol - local column indices for submatrix
10117:    submat - the submatrix

10119:    Level: intermediate

10121: .seealso: MatGetLocalSubMatrix()
10122: @*/
10123: PetscErrorCode MatRestoreLocalSubMatrix(Mat mat,IS isrow,IS iscol,Mat *submat)
10124: {

10133:   if (*submat) {
10135:   }

10137:   if (mat->ops->restorelocalsubmatrix) {
10138:     (*mat->ops->restorelocalsubmatrix)(mat,isrow,iscol,submat);
10139:   } else {
10140:     MatDestroy(submat);
10141:   }
10142:   *submat = NULL;
10143:   return(0);
10144: }

10146: /* --------------------------------------------------------*/
10149: /*@
10150:    MatFindZeroDiagonals - Finds all the rows of a matrix that have zero or no entry in the matrix

10152:    Collective on Mat

10154:    Input Parameter:
10155: .  mat - the matrix

10157:    Output Parameter:
10158: .  is - if any rows have zero diagonals this contains the list of them

10160:    Level: developer

10162:    Concepts: matrix-vector product

10164: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10165: @*/
10166: PetscErrorCode MatFindZeroDiagonals(Mat mat,IS *is)
10167: {

10173:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10174:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

10176:   if (!mat->ops->findzerodiagonals) {
10177:     Vec                diag;
10178:     const PetscScalar *a;
10179:     PetscInt          *rows;
10180:     PetscInt           rStart, rEnd, r, nrow = 0;

10182:     MatCreateVecs(mat, &diag, NULL);
10183:     MatGetDiagonal(mat, diag);
10184:     MatGetOwnershipRange(mat, &rStart, &rEnd);
10185:     VecGetArrayRead(diag, &a);
10186:     for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) ++nrow;
10187:     PetscMalloc1(nrow, &rows);
10188:     nrow = 0;
10189:     for (r = 0; r < rEnd-rStart; ++r) if (a[r] == 0.0) rows[nrow++] = r+rStart;
10190:     VecRestoreArrayRead(diag, &a);
10191:     VecDestroy(&diag);
10192:     ISCreateGeneral(PetscObjectComm((PetscObject) mat), nrow, rows, PETSC_OWN_POINTER, is);
10193:   } else {
10194:     (*mat->ops->findzerodiagonals)(mat, is);
10195:   }
10196:   return(0);
10197: }

10201: /*@
10202:    MatFindOffBlockDiagonalEntries - Finds all the rows of a matrix that have entries outside of the main diagonal block (defined by the matrix block size)

10204:    Collective on Mat

10206:    Input Parameter:
10207: .  mat - the matrix

10209:    Output Parameter:
10210: .  is - contains the list of rows with off block diagonal entries

10212:    Level: developer

10214:    Concepts: matrix-vector product

10216: .seealso: MatMultTranspose(), MatMultAdd(), MatMultTransposeAdd()
10217: @*/
10218: PetscErrorCode MatFindOffBlockDiagonalEntries(Mat mat,IS *is)
10219: {

10225:   if (!mat->assembled) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10226:   if (mat->factortype) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");

10228:   if (!mat->ops->findoffblockdiagonalentries) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"This matrix type does not have a find off block diagonal entries defined");
10229:   (*mat->ops->findoffblockdiagonalentries)(mat,is);
10230:   return(0);
10231: }

10235: /*@C
10236:   MatInvertBlockDiagonal - Inverts the block diagonal entries.

10238:   Collective on Mat

10240:   Input Parameters:
10241: . mat - the matrix

10243:   Output Parameters:
10244: . values - the block inverses in column major order (FORTRAN-like)

10246:    Note:
10247:    This routine is not available from Fortran.

10249:   Level: advanced
10250: @*/
10251: PetscErrorCode MatInvertBlockDiagonal(Mat mat,const PetscScalar **values)
10252: {

10257:   if (!mat->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix");
10258:   if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
10259:   if (!mat->ops->invertblockdiagonal) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported");
10260:   (*mat->ops->invertblockdiagonal)(mat,values);
10261:   return(0);
10262: }

10266: /*@C
10267:     MatTransposeColoringDestroy - Destroys a coloring context for matrix product C=A*B^T that was created
10268:     via MatTransposeColoringCreate().

10270:     Collective on MatTransposeColoring

10272:     Input Parameter:
10273: .   c - coloring context

10275:     Level: intermediate

10277: .seealso: MatTransposeColoringCreate()
10278: @*/
10279: PetscErrorCode MatTransposeColoringDestroy(MatTransposeColoring *c)
10280: {
10281:   PetscErrorCode       ierr;
10282:   MatTransposeColoring matcolor=*c;

10285:   if (!matcolor) return(0);
10286:   if (--((PetscObject)matcolor)->refct > 0) {matcolor = 0; return(0);}

10288:   PetscFree3(matcolor->ncolumns,matcolor->nrows,matcolor->colorforrow);
10289:   PetscFree(matcolor->rows);
10290:   PetscFree(matcolor->den2sp);
10291:   PetscFree(matcolor->colorforcol);
10292:   PetscFree(matcolor->columns);
10293:   if (matcolor->brows>0) {
10294:     PetscFree(matcolor->lstart);
10295:   }
10296:   PetscHeaderDestroy(c);
10297:   return(0);
10298: }

10302: /*@C
10303:     MatTransColoringApplySpToDen - Given a symbolic matrix product C=A*B^T for which
10304:     a MatTransposeColoring context has been created, computes a dense B^T by Apply
10305:     MatTransposeColoring to sparse B.

10307:     Collective on MatTransposeColoring

10309:     Input Parameters:
10310: +   B - sparse matrix B
10311: .   Btdense - symbolic dense matrix B^T
10312: -   coloring - coloring context created with MatTransposeColoringCreate()

10314:     Output Parameter:
10315: .   Btdense - dense matrix B^T

10317:     Options Database Keys:
10318: +    -mat_transpose_coloring_view - Activates basic viewing or coloring
10319: .    -mat_transpose_coloring_view_draw - Activates drawing of coloring
10320: -    -mat_transpose_coloring_view_info - Activates viewing of coloring info

10322:     Level: intermediate

10324: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy()

10326: .keywords: coloring
10327: @*/
10328: PetscErrorCode MatTransColoringApplySpToDen(MatTransposeColoring coloring,Mat B,Mat Btdense)
10329: {


10337:   if (!B->ops->transcoloringapplysptoden) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)B)->type_name);
10338:   (B->ops->transcoloringapplysptoden)(coloring,B,Btdense);
10339:   return(0);
10340: }

10344: /*@C
10345:     MatTransColoringApplyDenToSp - Given a symbolic matrix product Csp=A*B^T for which
10346:     a MatTransposeColoring context has been created and a dense matrix Cden=A*Btdense
10347:     in which Btdens is obtained from MatTransColoringApplySpToDen(), recover sparse matrix
10348:     Csp from Cden.

10350:     Collective on MatTransposeColoring

10352:     Input Parameters:
10353: +   coloring - coloring context created with MatTransposeColoringCreate()
10354: -   Cden - matrix product of a sparse matrix and a dense matrix Btdense

10356:     Output Parameter:
10357: .   Csp - sparse matrix

10359:     Options Database Keys:
10360: +    -mat_multtranspose_coloring_view - Activates basic viewing or coloring
10361: .    -mat_multtranspose_coloring_view_draw - Activates drawing of coloring
10362: -    -mat_multtranspose_coloring_view_info - Activates viewing of coloring info

10364:     Level: intermediate

10366: .seealso: MatTransposeColoringCreate(), MatTransposeColoringDestroy(), MatTransColoringApplySpToDen()

10368: .keywords: coloring
10369: @*/
10370: PetscErrorCode MatTransColoringApplyDenToSp(MatTransposeColoring matcoloring,Mat Cden,Mat Csp)
10371: {


10379:   if (!Csp->ops->transcoloringapplydentosp) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported for this matrix type %s",((PetscObject)Csp)->type_name);
10380:   (Csp->ops->transcoloringapplydentosp)(matcoloring,Cden,Csp);
10381:   return(0);
10382: }

10386: /*@C
10387:    MatTransposeColoringCreate - Creates a matrix coloring context for matrix product C=A*B^T.

10389:    Collective on Mat

10391:    Input Parameters:
10392: +  mat - the matrix product C
10393: -  iscoloring - the coloring of the matrix; usually obtained with MatColoringCreate() or DMCreateColoring()

10395:     Output Parameter:
10396: .   color - the new coloring context

10398:     Level: intermediate

10400: .seealso: MatTransposeColoringDestroy(), MatTransposeColoringSetFromOptions(), MatTransColoringApplySpToDen(),
10401:            MatTransColoringApplyDenToSp(), MatTransposeColoringView(),
10402: @*/
10403: PetscErrorCode MatTransposeColoringCreate(Mat mat,ISColoring iscoloring,MatTransposeColoring *color)
10404: {
10405:   MatTransposeColoring c;
10406:   MPI_Comm             comm;
10407:   PetscErrorCode       ierr;

10410:   PetscLogEventBegin(MAT_TransposeColoringCreate,mat,0,0,0);
10411:   PetscObjectGetComm((PetscObject)mat,&comm);
10412:   PetscHeaderCreate(c,MAT_TRANSPOSECOLORING_CLASSID,"MatTransposeColoring","Matrix product C=A*B^T via coloring","Mat",comm,MatTransposeColoringDestroy,NULL);

10414:   c->ctype = iscoloring->ctype;
10415:   if (mat->ops->transposecoloringcreate) {
10416:     (*mat->ops->transposecoloringcreate)(mat,iscoloring,c);
10417:   } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Code not yet written for this matrix type");

10419:   *color = c;
10420:   PetscLogEventEnd(MAT_TransposeColoringCreate,mat,0,0,0);
10421:   return(0);
10422: }

10426: /*@
10427:       MatGetNonzeroState - Returns a 64 bit integer representing the current state of nonzeros in the matrix. If the
10428:         matrix has had no new nonzero locations added to the matrix since the previous call then the value will be the
10429:         same, otherwise it will be larger

10431:      Not Collective

10433:   Input Parameter:
10434: .    A  - the matrix

10436:   Output Parameter:
10437: .    state - the current state

10439:   Notes: You can only compare states from two different calls to the SAME matrix, you cannot compare calls between
10440:          different matrices

10442:   Level: intermediate

10444: @*/
10445: PetscErrorCode MatGetNonzeroState(Mat mat,PetscObjectState *state)
10446: {
10449:   *state = mat->nonzerostate;
10450:   return(0);
10451: }

10455: /*@
10456:       MatCreateMPIMatConcatenateSeqMat - Creates a single large PETSc matrix by concatenating sequential
10457:                  matrices from each processor

10459:     Collective on MPI_Comm

10461:    Input Parameters:
10462: +    comm - the communicators the parallel matrix will live on
10463: .    seqmat - the input sequential matrices
10464: .    n - number of local columns (or PETSC_DECIDE)
10465: -    reuse - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX

10467:    Output Parameter:
10468: .    mpimat - the parallel matrix generated

10470:     Level: advanced

10472:    Notes: The number of columns of the matrix in EACH processor MUST be the same.

10474: @*/
10475: PetscErrorCode MatCreateMPIMatConcatenateSeqMat(MPI_Comm comm,Mat seqmat,PetscInt n,MatReuse reuse,Mat *mpimat)
10476: {
10478:   PetscMPIInt    size;

10481:   MPI_Comm_size(comm,&size);
10482:   if (size == 1) {
10483:     if (reuse == MAT_INITIAL_MATRIX) {
10484:       MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);
10485:     } else {
10486:       MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);
10487:     }
10488:     return(0);
10489:   }

10491:   if (!seqmat->ops->creatempimatconcatenateseqmat) SETERRQ1(PetscObjectComm((PetscObject)seqmat),PETSC_ERR_SUP,"Mat type %s",((PetscObject)seqmat)->type_name);
10492:   PetscLogEventBegin(MAT_Merge,seqmat,0,0,0);
10493:   (*seqmat->ops->creatempimatconcatenateseqmat)(comm,seqmat,n,reuse,mpimat);
10494:   PetscLogEventEnd(MAT_Merge,seqmat,0,0,0);
10495:   return(0);
10496: }

10500: /*@
10501:      MatSubdomainsCreateCoalesce - Creates index subdomains by coalescing adjacent
10502:                  ranks' ownership ranges.

10504:     Collective on A

10506:    Input Parameters:
10507: +    A   - the matrix to create subdomains from
10508: -    N   - requested number of subdomains


10511:    Output Parameters:
10512: +    n   - number of subdomains resulting on this rank
10513: -    iss - IS list with indices of subdomains on this rank

10515:     Level: advanced

10517:     Notes: number of subdomains must be smaller than the communicator size
10518: @*/
10519: PetscErrorCode MatSubdomainsCreateCoalesce(Mat A,PetscInt N,PetscInt *n,IS *iss[])
10520: {
10521:   MPI_Comm        comm,subcomm;
10522:   PetscMPIInt     size,rank,color;
10523:   PetscInt        rstart,rend,k;
10524:   PetscErrorCode  ierr;

10527:   PetscObjectGetComm((PetscObject)A,&comm);
10528:   MPI_Comm_size(comm,&size);
10529:   MPI_Comm_rank(comm,&rank);
10530:   if (N < 1 || N >= (PetscInt)size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and < %D, got N = %D",size,N);
10531:   *n = 1;
10532:   k = ((PetscInt)size)/N + ((PetscInt)size%N>0); /* There are up to k ranks to a color */
10533:   color = rank/k;
10534:   MPI_Comm_split(comm,color,rank,&subcomm);
10535:   PetscMalloc1(1,iss);
10536:   MatGetOwnershipRange(A,&rstart,&rend);
10537:   ISCreateStride(subcomm,rend-rstart,rstart,1,iss[0]);
10538:   MPI_Comm_free(&subcomm);
10539:   return(0);
10540: }