Actual source code: mumps.c
2: /*
3: Provides an interface to the MUMPS sparse solver
4: */
6: #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
7: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
9: EXTERN_C_BEGIN
10: #if defined(PETSC_USE_COMPLEX)
11: #include <zmumps_c.h>
12: #else
13: #include <dmumps_c.h>
14: #endif
15: EXTERN_C_END
16: #define JOB_INIT -1
17: #define JOB_FACTSYMBOLIC 1
18: #define JOB_FACTNUMERIC 2
19: #define JOB_SOLVE 3
20: #define JOB_END -2
23: /* macros s.t. indices match MUMPS documentation */
24: #define ICNTL(I) icntl[(I)-1]
25: #define CNTL(I) cntl[(I)-1]
26: #define INFOG(I) infog[(I)-1]
27: #define INFO(I) info[(I)-1]
28: #define RINFOG(I) rinfog[(I)-1]
29: #define RINFO(I) rinfo[(I)-1]
31: typedef struct {
32: #if defined(PETSC_USE_COMPLEX)
33: ZMUMPS_STRUC_C id;
34: #else
35: DMUMPS_STRUC_C id;
36: #endif
37: MatStructure matstruc;
38: PetscMPIInt myid,size;
39: PetscInt *irn,*jcn,nz,sym,nSolve;
40: PetscScalar *val;
41: MPI_Comm comm_mumps;
42: VecScatter scat_rhs, scat_sol;
43: PetscBool isAIJ,CleanUpMUMPS;
44: Vec b_seq,x_seq;
45: PetscErrorCode (*Destroy)(Mat);
46: PetscErrorCode (*ConvertToTriples)(Mat, int, MatReuse, int*, int**, int**, PetscScalar**);
47: } Mat_MUMPS;
49: extern PetscErrorCode MatDuplicate_MUMPS(Mat,MatDuplicateOption,Mat*);
52: /* MatConvertToTriples_A_B */
53: /*convert Petsc matrix to triples: row[nz], col[nz], val[nz] */
54: /*
55: input:
56: A - matrix in aij,baij or sbaij (bs=1) format
57: shift - 0: C style output triple; 1: Fortran style output triple.
58: reuse - MAT_INITIAL_MATRIX: spaces are allocated and values are set for the triple
59: MAT_REUSE_MATRIX: only the values in v array are updated
60: output:
61: nnz - dim of r, c, and v (number of local nonzero entries of A)
62: r, c, v - row and col index, matrix values (matrix triples)
63: */
67: PetscErrorCode MatConvertToTriples_seqaij_seqaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
68: {
69: const PetscInt *ai,*aj,*ajj,M=A->rmap->n;
70: PetscInt nz,rnz,i,j;
71: PetscErrorCode ierr;
72: PetscInt *row,*col;
73: Mat_SeqAIJ *aa=(Mat_SeqAIJ*)A->data;
76: *v=aa->a;
77: if (reuse == MAT_INITIAL_MATRIX){
78: nz = aa->nz; ai = aa->i; aj = aa->j;
79: *nnz = nz;
80: PetscMalloc(2*nz*sizeof(PetscInt), &row);
81: col = row + nz;
83: nz = 0;
84: for(i=0; i<M; i++) {
85: rnz = ai[i+1] - ai[i];
86: ajj = aj + ai[i];
87: for(j=0; j<rnz; j++) {
88: row[nz] = i+shift; col[nz++] = ajj[j] + shift;
89: }
90: }
91: *r = row; *c = col;
92: }
93: return(0);
94: }
98: PetscErrorCode MatConvertToTriples_seqbaij_seqaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
99: {
100: Mat_SeqBAIJ *aa=(Mat_SeqBAIJ*)A->data;
101: const PetscInt *ai,*aj,*ajj,bs=A->rmap->bs,bs2=aa->bs2,M=A->rmap->N/bs;
102: PetscInt nz,idx=0,rnz,i,j,k,m;
103: PetscErrorCode ierr;
104: PetscInt *row,*col;
107: *v = aa->a;
108: if (reuse == MAT_INITIAL_MATRIX){
109: ai = aa->i; aj = aa->j;
110: nz = bs2*aa->nz;
111: *nnz = nz;
112: PetscMalloc(2*nz*sizeof(PetscInt), &row);
113: col = row + nz;
115: for(i=0; i<M; i++) {
116: ajj = aj + ai[i];
117: rnz = ai[i+1] - ai[i];
118: for(k=0; k<rnz; k++) {
119: for(j=0; j<bs; j++) {
120: for(m=0; m<bs; m++) {
121: row[idx] = i*bs + m + shift;
122: col[idx++] = bs*(ajj[k]) + j + shift;
123: }
124: }
125: }
126: }
127: *r = row; *c = col;
128: }
129: return(0);
130: }
134: PetscErrorCode MatConvertToTriples_seqsbaij_seqsbaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
135: {
136: const PetscInt *ai, *aj,*ajj,M=A->rmap->n;
137: PetscInt nz,rnz,i,j;
138: PetscErrorCode ierr;
139: PetscInt *row,*col;
140: Mat_SeqSBAIJ *aa=(Mat_SeqSBAIJ*)A->data;
143: *v = aa->a;
144: if (reuse == MAT_INITIAL_MATRIX){
145: nz = aa->nz;ai=aa->i; aj=aa->j;*v=aa->a;
146: *nnz = nz;
147: PetscMalloc(2*nz*sizeof(PetscInt), &row);
148: col = row + nz;
150: nz = 0;
151: for(i=0; i<M; i++) {
152: rnz = ai[i+1] - ai[i];
153: ajj = aj + ai[i];
154: for(j=0; j<rnz; j++) {
155: row[nz] = i+shift; col[nz++] = ajj[j] + shift;
156: }
157: }
158: *r = row; *c = col;
159: }
160: return(0);
161: }
165: PetscErrorCode MatConvertToTriples_seqaij_seqsbaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
166: {
167: const PetscInt *ai,*aj,*ajj,*adiag,M=A->rmap->n;
168: PetscInt nz,rnz,i,j;
169: const PetscScalar *av,*v1;
170: PetscScalar *val;
171: PetscErrorCode ierr;
172: PetscInt *row,*col;
173: Mat_SeqSBAIJ *aa=(Mat_SeqSBAIJ*)A->data;
176: ai=aa->i; aj=aa->j;av=aa->a;
177: adiag=aa->diag;
178: if (reuse == MAT_INITIAL_MATRIX){
179: nz = M + (aa->nz-M)/2;
180: *nnz = nz;
181: PetscMalloc((2*nz*sizeof(PetscInt)+nz*sizeof(PetscScalar)), &row);
182: col = row + nz;
183: val = (PetscScalar*)(col + nz);
185: nz = 0;
186: for(i=0; i<M; i++) {
187: rnz = ai[i+1] - adiag[i];
188: ajj = aj + adiag[i];
189: v1 = av + adiag[i];
190: for(j=0; j<rnz; j++) {
191: row[nz] = i+shift; col[nz] = ajj[j] + shift; val[nz++] = v1[j];
192: }
193: }
194: *r = row; *c = col; *v = val;
195: } else {
196: nz = 0; val = *v;
197: for(i=0; i <M; i++) {
198: rnz = ai[i+1] - adiag[i];
199: ajj = aj + adiag[i];
200: v1 = av + adiag[i];
201: for(j=0; j<rnz; j++) {
202: val[nz++] = v1[j];
203: }
204: }
205: }
206: return(0);
207: }
211: PetscErrorCode MatConvertToTriples_mpisbaij_mpisbaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
212: {
213: const PetscInt *ai, *aj, *bi, *bj,*garray,m=A->rmap->n,*ajj,*bjj;
214: PetscErrorCode ierr;
215: PetscInt rstart,nz,i,j,jj,irow,countA,countB;
216: PetscInt *row,*col;
217: const PetscScalar *av, *bv,*v1,*v2;
218: PetscScalar *val;
219: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)A->data;
220: Mat_SeqSBAIJ *aa=(Mat_SeqSBAIJ*)(mat->A)->data;
221: Mat_SeqBAIJ *bb=(Mat_SeqBAIJ*)(mat->B)->data;
224: ai=aa->i; aj=aa->j; bi=bb->i; bj=bb->j; rstart= A->rmap->rstart;
225: garray = mat->garray;
226: av=aa->a; bv=bb->a;
228: if (reuse == MAT_INITIAL_MATRIX){
229: nz = aa->nz + bb->nz;
230: *nnz = nz;
231: PetscMalloc((2*nz*sizeof(PetscInt)+nz*sizeof(PetscScalar)), &row);
232: col = row + nz;
233: val = (PetscScalar*)(col + nz);
235: *r = row; *c = col; *v = val;
236: } else {
237: row = *r; col = *c; val = *v;
238: }
240: jj = 0; irow = rstart;
241: for ( i=0; i<m; i++ ) {
242: ajj = aj + ai[i]; /* ptr to the beginning of this row */
243: countA = ai[i+1] - ai[i];
244: countB = bi[i+1] - bi[i];
245: bjj = bj + bi[i];
246: v1 = av + ai[i];
247: v2 = bv + bi[i];
249: /* A-part */
250: for (j=0; j<countA; j++){
251: if (reuse == MAT_INITIAL_MATRIX) {
252: row[jj] = irow + shift; col[jj] = rstart + ajj[j] + shift;
253: }
254: val[jj++] = v1[j];
255: }
257: /* B-part */
258: for(j=0; j < countB; j++){
259: if (reuse == MAT_INITIAL_MATRIX) {
260: row[jj] = irow + shift; col[jj] = garray[bjj[j]] + shift;
261: }
262: val[jj++] = v2[j];
263: }
264: irow++;
265: }
266: return(0);
267: }
271: PetscErrorCode MatConvertToTriples_mpiaij_mpiaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
272: {
273: const PetscInt *ai, *aj, *bi, *bj,*garray,m=A->rmap->n,*ajj,*bjj;
274: PetscErrorCode ierr;
275: PetscInt rstart,nz,i,j,jj,irow,countA,countB;
276: PetscInt *row,*col;
277: const PetscScalar *av, *bv,*v1,*v2;
278: PetscScalar *val;
279: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
280: Mat_SeqAIJ *aa=(Mat_SeqAIJ*)(mat->A)->data;
281: Mat_SeqAIJ *bb=(Mat_SeqAIJ*)(mat->B)->data;
284: ai=aa->i; aj=aa->j; bi=bb->i; bj=bb->j; rstart= A->rmap->rstart;
285: garray = mat->garray;
286: av=aa->a; bv=bb->a;
288: if (reuse == MAT_INITIAL_MATRIX){
289: nz = aa->nz + bb->nz;
290: *nnz = nz;
291: PetscMalloc((2*nz*sizeof(PetscInt)+nz*sizeof(PetscScalar)), &row);
292: col = row + nz;
293: val = (PetscScalar*)(col + nz);
295: *r = row; *c = col; *v = val;
296: } else {
297: row = *r; col = *c; val = *v;
298: }
300: jj = 0; irow = rstart;
301: for ( i=0; i<m; i++ ) {
302: ajj = aj + ai[i]; /* ptr to the beginning of this row */
303: countA = ai[i+1] - ai[i];
304: countB = bi[i+1] - bi[i];
305: bjj = bj + bi[i];
306: v1 = av + ai[i];
307: v2 = bv + bi[i];
309: /* A-part */
310: for (j=0; j<countA; j++){
311: if (reuse == MAT_INITIAL_MATRIX){
312: row[jj] = irow + shift; col[jj] = rstart + ajj[j] + shift;
313: }
314: val[jj++] = v1[j];
315: }
317: /* B-part */
318: for(j=0; j < countB; j++){
319: if (reuse == MAT_INITIAL_MATRIX){
320: row[jj] = irow + shift; col[jj] = garray[bjj[j]] + shift;
321: }
322: val[jj++] = v2[j];
323: }
324: irow++;
325: }
326: return(0);
327: }
331: PetscErrorCode MatConvertToTriples_mpibaij_mpiaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
332: {
333: Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)A->data;
334: Mat_SeqBAIJ *aa=(Mat_SeqBAIJ*)(mat->A)->data;
335: Mat_SeqBAIJ *bb=(Mat_SeqBAIJ*)(mat->B)->data;
336: const PetscInt *ai = aa->i, *bi = bb->i, *aj = aa->j, *bj = bb->j,*ajj, *bjj;
337: const PetscInt *garray = mat->garray,mbs=mat->mbs,rstart=A->rmap->rstart;
338: const PetscInt bs = A->rmap->bs,bs2=mat->bs2;
339: PetscErrorCode ierr;
340: PetscInt nz,i,j,k,n,jj,irow,countA,countB,idx;
341: PetscInt *row,*col;
342: const PetscScalar *av=aa->a, *bv=bb->a,*v1,*v2;
343: PetscScalar *val;
347: if (reuse == MAT_INITIAL_MATRIX) {
348: nz = bs2*(aa->nz + bb->nz);
349: *nnz = nz;
350: PetscMalloc((2*nz*sizeof(PetscInt)+nz*sizeof(PetscScalar)), &row);
351: col = row + nz;
352: val = (PetscScalar*)(col + nz);
354: *r = row; *c = col; *v = val;
355: } else {
356: row = *r; col = *c; val = *v;
357: }
359: jj = 0; irow = rstart;
360: for ( i=0; i<mbs; i++ ) {
361: countA = ai[i+1] - ai[i];
362: countB = bi[i+1] - bi[i];
363: ajj = aj + ai[i];
364: bjj = bj + bi[i];
365: v1 = av + bs2*ai[i];
366: v2 = bv + bs2*bi[i];
368: idx = 0;
369: /* A-part */
370: for (k=0; k<countA; k++){
371: for (j=0; j<bs; j++) {
372: for (n=0; n<bs; n++) {
373: if (reuse == MAT_INITIAL_MATRIX){
374: row[jj] = irow + n + shift;
375: col[jj] = rstart + bs*ajj[k] + j + shift;
376: }
377: val[jj++] = v1[idx++];
378: }
379: }
380: }
382: idx = 0;
383: /* B-part */
384: for(k=0; k<countB; k++){
385: for (j=0; j<bs; j++) {
386: for (n=0; n<bs; n++) {
387: if (reuse == MAT_INITIAL_MATRIX){
388: row[jj] = irow + n + shift;
389: col[jj] = bs*garray[bjj[k]] + j + shift;
390: }
391: val[jj++] = v2[idx++];
392: }
393: }
394: }
395: irow += bs;
396: }
397: return(0);
398: }
402: PetscErrorCode MatConvertToTriples_mpiaij_mpisbaij(Mat A,int shift,MatReuse reuse,int *nnz,int **r, int **c, PetscScalar **v)
403: {
404: const PetscInt *ai, *aj,*adiag, *bi, *bj,*garray,m=A->rmap->n,*ajj,*bjj;
405: PetscErrorCode ierr;
406: PetscInt rstart,nz,nza,nzb,i,j,jj,irow,countA,countB;
407: PetscInt *row,*col;
408: const PetscScalar *av, *bv,*v1,*v2;
409: PetscScalar *val;
410: Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data;
411: Mat_SeqAIJ *aa=(Mat_SeqAIJ*)(mat->A)->data;
412: Mat_SeqAIJ *bb=(Mat_SeqAIJ*)(mat->B)->data;
415: ai=aa->i; aj=aa->j; adiag=aa->diag;
416: bi=bb->i; bj=bb->j; garray = mat->garray;
417: av=aa->a; bv=bb->a;
418: rstart = A->rmap->rstart;
420: if (reuse == MAT_INITIAL_MATRIX) {
421: nza = 0; /* num of upper triangular entries in mat->A, including diagonals */
422: nzb = 0; /* num of upper triangular entries in mat->B */
423: for(i=0; i<m; i++){
424: nza += (ai[i+1] - adiag[i]);
425: countB = bi[i+1] - bi[i];
426: bjj = bj + bi[i];
427: for (j=0; j<countB; j++){
428: if (garray[bjj[j]] > rstart) nzb++;
429: }
430: }
431:
432: nz = nza + nzb; /* total nz of upper triangular part of mat */
433: *nnz = nz;
434: PetscMalloc((2*nz*sizeof(PetscInt)+nz*sizeof(PetscScalar)), &row);
435: col = row + nz;
436: val = (PetscScalar*)(col + nz);
438: *r = row; *c = col; *v = val;
439: } else {
440: row = *r; col = *c; val = *v;
441: }
443: jj = 0; irow = rstart;
444: for ( i=0; i<m; i++ ) {
445: ajj = aj + adiag[i]; /* ptr to the beginning of the diagonal of this row */
446: v1 = av + adiag[i];
447: countA = ai[i+1] - adiag[i];
448: countB = bi[i+1] - bi[i];
449: bjj = bj + bi[i];
450: v2 = bv + bi[i];
452: /* A-part */
453: for (j=0; j<countA; j++){
454: if (reuse == MAT_INITIAL_MATRIX) {
455: row[jj] = irow + shift; col[jj] = rstart + ajj[j] + shift;
456: }
457: val[jj++] = v1[j];
458: }
460: /* B-part */
461: for(j=0; j < countB; j++){
462: if (garray[bjj[j]] > rstart) {
463: if (reuse == MAT_INITIAL_MATRIX) {
464: row[jj] = irow + shift; col[jj] = garray[bjj[j]] + shift;
465: }
466: val[jj++] = v2[j];
467: }
468: }
469: irow++;
470: }
471: return(0);
472: }
476: PetscErrorCode MatDestroy_MUMPS(Mat A)
477: {
478: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
482: if (lu && lu->CleanUpMUMPS) {
483: /* Terminate instance, deallocate memories */
484: PetscFree2(lu->id.sol_loc,lu->id.isol_loc);
485: VecScatterDestroy(&lu->scat_rhs);
486: VecDestroy(&lu->b_seq);
487: VecScatterDestroy(&lu->scat_sol);
488: VecDestroy(&lu->x_seq);
489: ierr=PetscFree(lu->id.perm_in);
490: PetscFree(lu->irn);
491: lu->id.job=JOB_END;
492: #if defined(PETSC_USE_COMPLEX)
493: zmumps_c(&lu->id);
494: #else
495: dmumps_c(&lu->id);
496: #endif
497: MPI_Comm_free(&(lu->comm_mumps));
498: }
499: if (lu && lu->Destroy) {
500: (lu->Destroy)(A);
501: }
502: PetscFree(A->spptr);
504: /* clear composed functions */
505: PetscObjectComposeFunctionDynamic((PetscObject)A,"MatFactorGetSolverPackage_C","",PETSC_NULL);
506: PetscObjectComposeFunctionDynamic((PetscObject)A,"MatMumpsSetIcntl_C","",PETSC_NULL);
507: return(0);
508: }
512: PetscErrorCode MatSolve_MUMPS(Mat A,Vec b,Vec x)
513: {
514: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
515: PetscScalar *array;
516: Vec b_seq;
517: IS is_iden,is_petsc;
519: PetscInt i;
522: lu->id.nrhs = 1;
523: b_seq = lu->b_seq;
524: if (lu->size > 1){
525: /* MUMPS only supports centralized rhs. Scatter b into a seqential rhs vector */
526: VecScatterBegin(lu->scat_rhs,b,b_seq,INSERT_VALUES,SCATTER_FORWARD);
527: VecScatterEnd(lu->scat_rhs,b,b_seq,INSERT_VALUES,SCATTER_FORWARD);
528: if (!lu->myid) {VecGetArray(b_seq,&array);}
529: } else { /* size == 1 */
530: VecCopy(b,x);
531: VecGetArray(x,&array);
532: }
533: if (!lu->myid) { /* define rhs on the host */
534: lu->id.nrhs = 1;
535: #if defined(PETSC_USE_COMPLEX)
536: lu->id.rhs = (mumps_double_complex*)array;
537: #else
538: lu->id.rhs = array;
539: #endif
540: }
542: /* solve phase */
543: /*-------------*/
544: lu->id.job = JOB_SOLVE;
545: #if defined(PETSC_USE_COMPLEX)
546: zmumps_c(&lu->id);
547: #else
548: dmumps_c(&lu->id);
549: #endif
550: if (lu->id.INFOG(1) < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in solve phase: INFOG(1)=%d\n",lu->id.INFOG(1));
552: if (lu->size > 1) { /* convert mumps distributed solution to petsc mpi x */
553: if (!lu->nSolve){ /* create scatter scat_sol */
554: ISCreateStride(PETSC_COMM_SELF,lu->id.lsol_loc,0,1,&is_iden); /* from */
555: for (i=0; i<lu->id.lsol_loc; i++){
556: lu->id.isol_loc[i] -= 1; /* change Fortran style to C style */
557: }
558: ISCreateGeneral(PETSC_COMM_SELF,lu->id.lsol_loc,lu->id.isol_loc,PETSC_COPY_VALUES,&is_petsc); /* to */
559: VecScatterCreate(lu->x_seq,is_iden,x,is_petsc,&lu->scat_sol);
560: ISDestroy(&is_iden);
561: ISDestroy(&is_petsc);
562: }
563: VecScatterBegin(lu->scat_sol,lu->x_seq,x,INSERT_VALUES,SCATTER_FORWARD);
564: VecScatterEnd(lu->scat_sol,lu->x_seq,x,INSERT_VALUES,SCATTER_FORWARD);
565: }
566: lu->nSolve++;
567: return(0);
568: }
572: PetscErrorCode MatSolveTranspose_MUMPS(Mat A,Vec b,Vec x)
573: {
574: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
578: lu->id.ICNTL(9) = 0;
579: MatSolve_MUMPS(A,b,x);
580: lu->id.ICNTL(9) = 1;
581: return(0);
582: }
586: PetscErrorCode MatMatSolve_MUMPS(Mat A,Mat B,Mat X)
587: {
589: PetscBool flg;
592: PetscTypeCompareAny((PetscObject)B,&flg,MATSEQDENSE,MATMPIDENSE,PETSC_NULL);
593: if (!flg) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Matrix B must be MATDENSE matrix");
594: PetscTypeCompareAny((PetscObject)X,&flg,MATSEQDENSE,MATMPIDENSE,PETSC_NULL);
595: if (!flg) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Matrix X must be MATDENSE matrix"); SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatMatSolve_MUMPS() is not implemented yet");
596: return(0);
597: }
599: #if !defined(PETSC_USE_COMPLEX)
600: /*
601: input:
602: F: numeric factor
603: output:
604: nneg: total number of negative pivots
605: nzero: 0
606: npos: (global dimension of F) - nneg
607: */
611: PetscErrorCode MatGetInertia_SBAIJMUMPS(Mat F,int *nneg,int *nzero,int *npos)
612: {
613: Mat_MUMPS *lu =(Mat_MUMPS*)F->spptr;
615: PetscMPIInt size;
618: MPI_Comm_size(((PetscObject)F)->comm,&size);
619: /* MUMPS 4.3.1 calls ScaLAPACK when ICNTL(13)=0 (default), which does not offer the possibility to compute the inertia of a dense matrix. Set ICNTL(13)=1 to skip ScaLAPACK */
620: if (size > 1 && lu->id.ICNTL(13) != 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"ICNTL(13)=%d. -mat_mumps_icntl_13 must be set as 1 for correct global matrix inertia\n",lu->id.INFOG(13));
621: if (nneg){
622: if (!lu->myid){
623: *nneg = lu->id.INFOG(12);
624: }
625: MPI_Bcast(nneg,1,MPI_INT,0,lu->comm_mumps);
626: }
627: if (nzero) *nzero = 0;
628: if (npos) *npos = F->rmap->N - (*nneg);
629: return(0);
630: }
631: #endif /* !defined(PETSC_USE_COMPLEX) */
635: PetscErrorCode MatFactorNumeric_MUMPS(Mat F,Mat A,const MatFactorInfo *info)
636: {
637: Mat_MUMPS *lu =(Mat_MUMPS*)(F)->spptr;
638: PetscErrorCode ierr;
639: MatReuse reuse;
640: Mat F_diag;
641: PetscBool isMPIAIJ;
644: reuse = MAT_REUSE_MATRIX;
645: (*lu->ConvertToTriples)(A, 1, reuse, &lu->nz, &lu->irn, &lu->jcn, &lu->val);
647: /* numerical factorization phase */
648: /*-------------------------------*/
649: lu->id.job = JOB_FACTNUMERIC;
650: if(!lu->id.ICNTL(18)) {
651: if (!lu->myid) {
652: #if defined(PETSC_USE_COMPLEX)
653: lu->id.a = (mumps_double_complex*)lu->val;
654: #else
655: lu->id.a = lu->val;
656: #endif
657: }
658: } else {
659: #if defined(PETSC_USE_COMPLEX)
660: lu->id.a_loc = (mumps_double_complex*)lu->val;
661: #else
662: lu->id.a_loc = lu->val;
663: #endif
664: }
665: #if defined(PETSC_USE_COMPLEX)
666: zmumps_c(&lu->id);
667: #else
668: dmumps_c(&lu->id);
669: #endif
670: if (lu->id.INFOG(1) < 0) {
671: if (lu->id.INFO(1) == -13) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in numerical factorization phase: Cannot allocate required memory %d megabytes\n",lu->id.INFO(2));
672: else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in numerical factorization phase: INFO(1)=%d, INFO(2)=%d\n",lu->id.INFO(1),lu->id.INFO(2));
673: }
674: if (!lu->myid && lu->id.ICNTL(16) > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB," lu->id.ICNTL(16):=%d\n",lu->id.INFOG(16));
676: if (lu->size > 1){
677: PetscTypeCompare((PetscObject)A,MATMPIAIJ,&isMPIAIJ);
678: if(isMPIAIJ) {
679: F_diag = ((Mat_MPIAIJ *)(F)->data)->A;
680: } else {
681: F_diag = ((Mat_MPISBAIJ *)(F)->data)->A;
682: }
683: F_diag->assembled = PETSC_TRUE;
684: if (lu->nSolve){
685: VecScatterDestroy(&lu->scat_sol);
686: PetscFree2(lu->id.sol_loc,lu->id.isol_loc);
687: VecDestroy(&lu->x_seq);
688: }
689: }
690: (F)->assembled = PETSC_TRUE;
691: lu->matstruc = SAME_NONZERO_PATTERN;
692: lu->CleanUpMUMPS = PETSC_TRUE;
693: lu->nSolve = 0;
694:
695: if (lu->size > 1){
696: /* distributed solution */
697: lu->id.ICNTL(21) = 1;
698: if (!lu->nSolve){
699: /* Create x_seq=sol_loc for repeated use */
700: PetscInt lsol_loc;
701: PetscScalar *sol_loc;
702: lsol_loc = lu->id.INFO(23); /* length of sol_loc */
703: PetscMalloc2(lsol_loc,PetscScalar,&sol_loc,lsol_loc,PetscInt,&lu->id.isol_loc);
704: lu->id.lsol_loc = lsol_loc;
705: #if defined(PETSC_USE_COMPLEX)
706: lu->id.sol_loc = (mumps_double_complex*)sol_loc;
707: #else
708: lu->id.sol_loc = sol_loc;
709: #endif
710: VecCreateSeqWithArray(PETSC_COMM_SELF,lsol_loc,sol_loc,&lu->x_seq);
711: }
712: }
713: return(0);
714: }
718: PetscErrorCode PetscSetMUMPSOptions(Mat F, Mat A)
719: {
720: Mat_MUMPS *lu = (Mat_MUMPS*)F->spptr;
721: PetscErrorCode ierr;
722: PetscInt icntl;
723: PetscBool flg;
726: PetscOptionsBegin(((PetscObject)A)->comm,((PetscObject)A)->prefix,"MUMPS Options","Mat");
727: if (lu->size == 1){
728: lu->id.ICNTL(18) = 0; /* centralized assembled matrix input */
729: } else {
730: lu->id.ICNTL(18) = 3; /* distributed assembled matrix input */
731: }
733: icntl=-1;
734: lu->id.ICNTL(4) = 0; /* level of printing; overwrite mumps default ICNTL(4)=2 */
735: PetscOptionsInt("-mat_mumps_icntl_4","ICNTL(4): level of printing (0 to 4)","None",lu->id.ICNTL(4),&icntl,&flg);
736: if ((flg && icntl > 0) || PetscLogPrintInfo) {
737: lu->id.ICNTL(4)=icntl; /* and use mumps default icntl(i), i=1,2,3 */
738: } else { /* no output */
739: lu->id.ICNTL(1) = 0; /* error message, default= 6 */
740: lu->id.ICNTL(2) = 0; /* output stream for diagnostic printing, statistics, and warning. default=0 */
741: lu->id.ICNTL(3) = 0; /* output stream for global information, default=6 */
742: }
743: PetscOptionsInt("-mat_mumps_icntl_6","ICNTL(6): column permutation and/or scaling to get a zero-free diagonal (0 to 7)","None",lu->id.ICNTL(6),&lu->id.ICNTL(6),PETSC_NULL);
744: icntl=-1;
745: PetscOptionsInt("-mat_mumps_icntl_7","ICNTL(7): sequential matrix ordering (0 to 7) 3 = Scotch, 5 = Metis","None",lu->id.ICNTL(7),&icntl,&flg);
746: if (flg) {
747: if (icntl== 1 && lu->size > 1){
748: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"pivot order be set by the user in PERM_IN -- not supported by the PETSc/MUMPS interface\n");
749: } else {
750: lu->id.ICNTL(7) = icntl;
751: }
752: }
753:
754: PetscOptionsInt("-mat_mumps_icntl_8","ICNTL(8): scaling strategy (-2 to 7 or 77)","None",lu->id.ICNTL(8),&lu->id.ICNTL(8),PETSC_NULL);
755: PetscOptionsInt("-mat_mumps_icntl_10","ICNTL(10): max num of refinements","None",lu->id.ICNTL(10),&lu->id.ICNTL(10),PETSC_NULL);
756: PetscOptionsInt("-mat_mumps_icntl_11","ICNTL(11): statistics related to the linear system solved (via -ksp_view)","None",lu->id.ICNTL(11),&lu->id.ICNTL(11),PETSC_NULL);
757: PetscOptionsInt("-mat_mumps_icntl_12","ICNTL(12): efficiency control: defines the ordering strategy with scaling constraints (0 to 3","None",lu->id.ICNTL(12),&lu->id.ICNTL(12),PETSC_NULL);
758: PetscOptionsInt("-mat_mumps_icntl_13","ICNTL(13): efficiency control: with or without ScaLAPACK","None",lu->id.ICNTL(13),&lu->id.ICNTL(13),PETSC_NULL);
759: PetscOptionsInt("-mat_mumps_icntl_14","ICNTL(14): percentage of estimated workspace increase","None",lu->id.ICNTL(14),&lu->id.ICNTL(14),PETSC_NULL);
760: PetscOptionsInt("-mat_mumps_icntl_19","ICNTL(19): Schur complement","None",lu->id.ICNTL(19),&lu->id.ICNTL(19),PETSC_NULL);
762: PetscOptionsInt("-mat_mumps_icntl_22","ICNTL(22): in-core/out-of-core facility (0 or 1)","None",lu->id.ICNTL(22),&lu->id.ICNTL(22),PETSC_NULL);
763: PetscOptionsInt("-mat_mumps_icntl_23","ICNTL(23): max size of the working memory (MB) that can allocate per processor","None",lu->id.ICNTL(23),&lu->id.ICNTL(23),PETSC_NULL);
764: PetscOptionsInt("-mat_mumps_icntl_24","ICNTL(24): detection of null pivot rows (0 or 1)","None",lu->id.ICNTL(24),&lu->id.ICNTL(24),PETSC_NULL);
765: if (lu->id.ICNTL(24)){
766: lu->id.ICNTL(13) = 1; /* turn-off ScaLAPACK to help with the correct detection of null pivots */
767: }
769: PetscOptionsInt("-mat_mumps_icntl_25","ICNTL(25): computation of a null space basis","None",lu->id.ICNTL(25),&lu->id.ICNTL(25),PETSC_NULL);
770: PetscOptionsInt("-mat_mumps_icntl_26","ICNTL(26): Schur options for right-hand side or solution vector","None",lu->id.ICNTL(26),&lu->id.ICNTL(26),PETSC_NULL);
771: PetscOptionsInt("-mat_mumps_icntl_27","ICNTL(27): experimental parameter","None",lu->id.ICNTL(27),&lu->id.ICNTL(27),PETSC_NULL);
772: PetscOptionsInt("-mat_mumps_icntl_28","ICNTL(28): use 1 for sequential analysis and ictnl(7) ordering, or 2 for parallel analysis and ictnl(29) ordering","None",lu->id.ICNTL(28),&lu->id.ICNTL(28),PETSC_NULL);
773: PetscOptionsInt("-mat_mumps_icntl_29","ICNTL(29): parallel ordering 1 = ptscotch 2 = parmetis","None",lu->id.ICNTL(29),&lu->id.ICNTL(29),PETSC_NULL);
774: PetscOptionsInt("-mat_mumps_icntl_30","ICNTL(30): compute user-specified set of entries in inv(A)","None",lu->id.ICNTL(30),&lu->id.ICNTL(30),PETSC_NULL);
775: PetscOptionsInt("-mat_mumps_icntl_31","ICNTL(31): factors can be discarded in the solve phase","None",lu->id.ICNTL(31),&lu->id.ICNTL(31),PETSC_NULL);
776: PetscOptionsInt("-mat_mumps_icntl_33","ICNTL(33): compute determinant","None",lu->id.ICNTL(33),&lu->id.ICNTL(33),PETSC_NULL);
778: PetscOptionsReal("-mat_mumps_cntl_1","CNTL(1): relative pivoting threshold","None",lu->id.CNTL(1),&lu->id.CNTL(1),PETSC_NULL);
779: PetscOptionsReal("-mat_mumps_cntl_2","CNTL(2): stopping criterion of refinement","None",lu->id.CNTL(2),&lu->id.CNTL(2),PETSC_NULL);
780: PetscOptionsReal("-mat_mumps_cntl_3","CNTL(3): absolute pivoting threshold","None",lu->id.CNTL(3),&lu->id.CNTL(3),PETSC_NULL);
781: PetscOptionsReal("-mat_mumps_cntl_4","CNTL(4): value for static pivoting","None",lu->id.CNTL(4),&lu->id.CNTL(4),PETSC_NULL);
782: PetscOptionsReal("-mat_mumps_cntl_5","CNTL(5): fixation for null pivots","None",lu->id.CNTL(5),&lu->id.CNTL(5),PETSC_NULL);
783: PetscOptionsEnd();
784: return(0);
785: }
786:
789: PetscErrorCode PetscInitializeMUMPS(Mat A,Mat_MUMPS* mumps)
790: {
791: PetscErrorCode ierr;
794: MPI_Comm_rank(((PetscObject)A)->comm, &mumps->myid);
795: MPI_Comm_size(((PetscObject)A)->comm,&mumps->size);
796: MPI_Comm_dup(((PetscObject)A)->comm,&(mumps->comm_mumps));
797: mumps->id.comm_fortran = MPI_Comm_c2f(mumps->comm_mumps);
799: mumps->id.job = JOB_INIT;
800: mumps->id.par = 1; /* host participates factorizaton and solve */
801: mumps->id.sym = mumps->sym;
802: #if defined(PETSC_USE_COMPLEX)
803: zmumps_c(&mumps->id);
804: #else
805: dmumps_c(&mumps->id);
806: #endif
808: mumps->CleanUpMUMPS = PETSC_FALSE;
809: mumps->scat_rhs = PETSC_NULL;
810: mumps->scat_sol = PETSC_NULL;
811: mumps->nSolve = 0;
812: return(0);
813: }
814:
815: /* Note the Petsc r and c permutations are ignored */
818: PetscErrorCode MatLUFactorSymbolic_AIJMUMPS(Mat F,Mat A,IS r,IS c,const MatFactorInfo *info)
819: {
820: Mat_MUMPS *lu = (Mat_MUMPS*)F->spptr;
821: PetscErrorCode ierr;
822: MatReuse reuse;
823: Vec b;
824: IS is_iden;
825: const PetscInt M = A->rmap->N;
828: lu->matstruc = DIFFERENT_NONZERO_PATTERN;
830: /* Set MUMPS options */
831: PetscSetMUMPSOptions(F,A);
832:
833: reuse = MAT_INITIAL_MATRIX;
834: (*lu->ConvertToTriples)(A, 1, reuse, &lu->nz, &lu->irn, &lu->jcn, &lu->val);
836: /* analysis phase */
837: /*----------------*/
838: lu->id.job = JOB_FACTSYMBOLIC;
839: lu->id.n = M;
840: switch (lu->id.ICNTL(18)){
841: case 0: /* centralized assembled matrix input */
842: if (!lu->myid) {
843: lu->id.nz =lu->nz; lu->id.irn=lu->irn; lu->id.jcn=lu->jcn;
844: if (lu->id.ICNTL(6)>1){
845: #if defined(PETSC_USE_COMPLEX)
846: lu->id.a = (mumps_double_complex*)lu->val;
847: #else
848: lu->id.a = lu->val;
849: #endif
850: }
851: if (lu->id.ICNTL(7) == 1){ /* use user-provide matrix ordering */
852: if (!lu->myid) {
853: const PetscInt *idx;
854: PetscInt i,*perm_in;
855: PetscMalloc(M*sizeof(PetscInt),&perm_in);
856: ISGetIndices(r,&idx);
857: lu->id.perm_in = perm_in;
858: for (i=0; i<M; i++) perm_in[i] = idx[i]+1; /* perm_in[]: start from 1, not 0! */
859: ISRestoreIndices(r,&idx);
860: }
861: }
862: }
863: break;
864: case 3: /* distributed assembled matrix input (size>1) */
865: lu->id.nz_loc = lu->nz;
866: lu->id.irn_loc=lu->irn; lu->id.jcn_loc=lu->jcn;
867: if (lu->id.ICNTL(6)>1) {
868: #if defined(PETSC_USE_COMPLEX)
869: lu->id.a_loc = (mumps_double_complex*)lu->val;
870: #else
871: lu->id.a_loc = lu->val;
872: #endif
873: }
874: /* MUMPS only supports centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
875: if (!lu->myid){
876: VecCreateSeq(PETSC_COMM_SELF,A->cmap->N,&lu->b_seq);
877: ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,&is_iden);
878: } else {
879: VecCreateSeq(PETSC_COMM_SELF,0,&lu->b_seq);
880: ISCreateStride(PETSC_COMM_SELF,0,0,1,&is_iden);
881: }
882: VecCreate(((PetscObject)A)->comm,&b);
883: VecSetSizes(b,A->rmap->n,PETSC_DECIDE);
884: VecSetFromOptions(b);
886: VecScatterCreate(b,is_iden,lu->b_seq,is_iden,&lu->scat_rhs);
887: ISDestroy(&is_iden);
888: VecDestroy(&b);
889: break;
890: }
891: #if defined(PETSC_USE_COMPLEX)
892: zmumps_c(&lu->id);
893: #else
894: dmumps_c(&lu->id);
895: #endif
896: if (lu->id.INFOG(1) < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in analysis phase: INFOG(1)=%d\n",lu->id.INFOG(1));
897:
898: F->ops->lufactornumeric = MatFactorNumeric_MUMPS;
899: F->ops->solve = MatSolve_MUMPS;
900: F->ops->solvetranspose = MatSolveTranspose_MUMPS;
901: F->ops->matsolve = MatMatSolve_MUMPS;
902: return(0);
903: }
905: /* Note the Petsc r and c permutations are ignored */
908: PetscErrorCode MatLUFactorSymbolic_BAIJMUMPS(Mat F,Mat A,IS r,IS c,const MatFactorInfo *info)
909: {
911: Mat_MUMPS *lu = (Mat_MUMPS*)F->spptr;
912: PetscErrorCode ierr;
913: MatReuse reuse;
914: Vec b;
915: IS is_iden;
916: const PetscInt M = A->rmap->N;
919: lu->matstruc = DIFFERENT_NONZERO_PATTERN;
921: /* Set MUMPS options */
922: PetscSetMUMPSOptions(F,A);
924: reuse = MAT_INITIAL_MATRIX;
925: (*lu->ConvertToTriples)(A, 1, reuse, &lu->nz, &lu->irn, &lu->jcn, &lu->val);
927: /* analysis phase */
928: /*----------------*/
929: lu->id.job = JOB_FACTSYMBOLIC;
930: lu->id.n = M;
931: switch (lu->id.ICNTL(18)){
932: case 0: /* centralized assembled matrix input */
933: if (!lu->myid) {
934: lu->id.nz =lu->nz; lu->id.irn=lu->irn; lu->id.jcn=lu->jcn;
935: if (lu->id.ICNTL(6)>1){
936: #if defined(PETSC_USE_COMPLEX)
937: lu->id.a = (mumps_double_complex*)lu->val;
938: #else
939: lu->id.a = lu->val;
940: #endif
941: }
942: }
943: break;
944: case 3: /* distributed assembled matrix input (size>1) */
945: lu->id.nz_loc = lu->nz;
946: lu->id.irn_loc=lu->irn; lu->id.jcn_loc=lu->jcn;
947: if (lu->id.ICNTL(6)>1) {
948: #if defined(PETSC_USE_COMPLEX)
949: lu->id.a_loc = (mumps_double_complex*)lu->val;
950: #else
951: lu->id.a_loc = lu->val;
952: #endif
953: }
954: /* MUMPS only supports centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
955: if (!lu->myid){
956: VecCreateSeq(PETSC_COMM_SELF,A->cmap->N,&lu->b_seq);
957: ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,&is_iden);
958: } else {
959: VecCreateSeq(PETSC_COMM_SELF,0,&lu->b_seq);
960: ISCreateStride(PETSC_COMM_SELF,0,0,1,&is_iden);
961: }
962: VecCreate(((PetscObject)A)->comm,&b);
963: VecSetSizes(b,A->rmap->n,PETSC_DECIDE);
964: VecSetFromOptions(b);
966: VecScatterCreate(b,is_iden,lu->b_seq,is_iden,&lu->scat_rhs);
967: ISDestroy(&is_iden);
968: VecDestroy(&b);
969: break;
970: }
971: #if defined(PETSC_USE_COMPLEX)
972: zmumps_c(&lu->id);
973: #else
974: dmumps_c(&lu->id);
975: #endif
976: if (lu->id.INFOG(1) < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in analysis phase: INFOG(1)=%d\n",lu->id.INFOG(1));
977:
978: F->ops->lufactornumeric = MatFactorNumeric_MUMPS;
979: F->ops->solve = MatSolve_MUMPS;
980: F->ops->solvetranspose = MatSolveTranspose_MUMPS;
981: return(0);
982: }
984: /* Note the Petsc r permutation and factor info are ignored */
987: PetscErrorCode MatCholeskyFactorSymbolic_MUMPS(Mat F,Mat A,IS r,const MatFactorInfo *info)
988: {
989: Mat_MUMPS *lu = (Mat_MUMPS*)F->spptr;
990: PetscErrorCode ierr;
991: MatReuse reuse;
992: Vec b;
993: IS is_iden;
994: const PetscInt M = A->rmap->N;
997: lu->matstruc = DIFFERENT_NONZERO_PATTERN;
999: /* Set MUMPS options */
1000: PetscSetMUMPSOptions(F,A);
1002: reuse = MAT_INITIAL_MATRIX;
1003: (*lu->ConvertToTriples)(A, 1 , reuse, &lu->nz, &lu->irn, &lu->jcn, &lu->val);
1005: /* analysis phase */
1006: /*----------------*/
1007: lu->id.job = JOB_FACTSYMBOLIC;
1008: lu->id.n = M;
1009: switch (lu->id.ICNTL(18)){
1010: case 0: /* centralized assembled matrix input */
1011: if (!lu->myid) {
1012: lu->id.nz =lu->nz; lu->id.irn=lu->irn; lu->id.jcn=lu->jcn;
1013: if (lu->id.ICNTL(6)>1){
1014: #if defined(PETSC_USE_COMPLEX)
1015: lu->id.a = (mumps_double_complex*)lu->val;
1016: #else
1017: lu->id.a = lu->val;
1018: #endif
1019: }
1020: }
1021: break;
1022: case 3: /* distributed assembled matrix input (size>1) */
1023: lu->id.nz_loc = lu->nz;
1024: lu->id.irn_loc=lu->irn; lu->id.jcn_loc=lu->jcn;
1025: if (lu->id.ICNTL(6)>1) {
1026: #if defined(PETSC_USE_COMPLEX)
1027: lu->id.a_loc = (mumps_double_complex*)lu->val;
1028: #else
1029: lu->id.a_loc = lu->val;
1030: #endif
1031: }
1032: /* MUMPS only supports centralized rhs. Create scatter scat_rhs for repeated use in MatSolve() */
1033: if (!lu->myid){
1034: VecCreateSeq(PETSC_COMM_SELF,A->cmap->N,&lu->b_seq);
1035: ISCreateStride(PETSC_COMM_SELF,A->cmap->N,0,1,&is_iden);
1036: } else {
1037: VecCreateSeq(PETSC_COMM_SELF,0,&lu->b_seq);
1038: ISCreateStride(PETSC_COMM_SELF,0,0,1,&is_iden);
1039: }
1040: VecCreate(((PetscObject)A)->comm,&b);
1041: VecSetSizes(b,A->rmap->n,PETSC_DECIDE);
1042: VecSetFromOptions(b);
1044: VecScatterCreate(b,is_iden,lu->b_seq,is_iden,&lu->scat_rhs);
1045: ISDestroy(&is_iden);
1046: VecDestroy(&b);
1047: break;
1048: }
1049: #if defined(PETSC_USE_COMPLEX)
1050: zmumps_c(&lu->id);
1051: #else
1052: dmumps_c(&lu->id);
1053: #endif
1054: if (lu->id.INFOG(1) < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error reported by MUMPS in analysis phase: INFOG(1)=%d\n",lu->id.INFOG(1));
1056: F->ops->choleskyfactornumeric = MatFactorNumeric_MUMPS;
1057: F->ops->solve = MatSolve_MUMPS;
1058: F->ops->solvetranspose = MatSolve_MUMPS;
1059: #if !defined(PETSC_USE_COMPLEX)
1060: F->ops->getinertia = MatGetInertia_SBAIJMUMPS;
1061: #else
1062: F->ops->getinertia = PETSC_NULL;
1063: #endif
1064: return(0);
1065: }
1069: PetscErrorCode MatView_MUMPS(Mat A,PetscViewer viewer)
1070: {
1071: PetscErrorCode ierr;
1072: PetscBool iascii;
1073: PetscViewerFormat format;
1074: Mat_MUMPS *lu=(Mat_MUMPS*)A->spptr;
1077: /* check if matrix is mumps type */
1078: if (A->ops->solve != MatSolve_MUMPS) return(0);
1080: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1081: if (iascii) {
1082: PetscViewerGetFormat(viewer,&format);
1083: if (format == PETSC_VIEWER_ASCII_INFO){
1084: PetscViewerASCIIPrintf(viewer,"MUMPS run parameters:\n");
1085: PetscViewerASCIIPrintf(viewer," SYM (matrix type): %d \n",lu->id.sym);
1086: PetscViewerASCIIPrintf(viewer," PAR (host participation): %d \n",lu->id.par);
1087: PetscViewerASCIIPrintf(viewer," ICNTL(1) (output for error): %d \n",lu->id.ICNTL(1));
1088: PetscViewerASCIIPrintf(viewer," ICNTL(2) (output of diagnostic msg): %d \n",lu->id.ICNTL(2));
1089: PetscViewerASCIIPrintf(viewer," ICNTL(3) (output for global info): %d \n",lu->id.ICNTL(3));
1090: PetscViewerASCIIPrintf(viewer," ICNTL(4) (level of printing): %d \n",lu->id.ICNTL(4));
1091: PetscViewerASCIIPrintf(viewer," ICNTL(5) (input mat struct): %d \n",lu->id.ICNTL(5));
1092: PetscViewerASCIIPrintf(viewer," ICNTL(6) (matrix prescaling): %d \n",lu->id.ICNTL(6));
1093: PetscViewerASCIIPrintf(viewer," ICNTL(7) (sequentia matrix ordering):%d \n",lu->id.ICNTL(7));
1094: PetscViewerASCIIPrintf(viewer," ICNTL(8) (scalling strategy): %d \n",lu->id.ICNTL(8));
1095: PetscViewerASCIIPrintf(viewer," ICNTL(10) (max num of refinements): %d \n",lu->id.ICNTL(10));
1096: PetscViewerASCIIPrintf(viewer," ICNTL(11) (error analysis): %d \n",lu->id.ICNTL(11));
1097: if (lu->id.ICNTL(11)>0) {
1098: PetscViewerASCIIPrintf(viewer," RINFOG(4) (inf norm of input mat): %g\n",lu->id.RINFOG(4));
1099: PetscViewerASCIIPrintf(viewer," RINFOG(5) (inf norm of solution): %g\n",lu->id.RINFOG(5));
1100: PetscViewerASCIIPrintf(viewer," RINFOG(6) (inf norm of residual): %g\n",lu->id.RINFOG(6));
1101: PetscViewerASCIIPrintf(viewer," RINFOG(7),RINFOG(8) (backward error est): %g, %g\n",lu->id.RINFOG(7),lu->id.RINFOG(8));
1102: PetscViewerASCIIPrintf(viewer," RINFOG(9) (error estimate): %g \n",lu->id.RINFOG(9));
1103: PetscViewerASCIIPrintf(viewer," RINFOG(10),RINFOG(11)(condition numbers): %g, %g\n",lu->id.RINFOG(10),lu->id.RINFOG(11));
1104: }
1105: PetscViewerASCIIPrintf(viewer," ICNTL(12) (efficiency control): %d \n",lu->id.ICNTL(12));
1106: PetscViewerASCIIPrintf(viewer," ICNTL(13) (efficiency control): %d \n",lu->id.ICNTL(13));
1107: PetscViewerASCIIPrintf(viewer," ICNTL(14) (percentage of estimated workspace increase): %d \n",lu->id.ICNTL(14));
1108: /* ICNTL(15-17) not used */
1109: PetscViewerASCIIPrintf(viewer," ICNTL(18) (input mat struct): %d \n",lu->id.ICNTL(18));
1110: PetscViewerASCIIPrintf(viewer," ICNTL(19) (Shur complement info): %d \n",lu->id.ICNTL(19));
1111: PetscViewerASCIIPrintf(viewer," ICNTL(20) (rhs sparse pattern): %d \n",lu->id.ICNTL(20));
1112: PetscViewerASCIIPrintf(viewer," ICNTL(21) (solution struct): %d \n",lu->id.ICNTL(21));
1113: PetscViewerASCIIPrintf(viewer," ICNTL(22) (in-core/out-of-core facility): %d \n",lu->id.ICNTL(22));
1114: PetscViewerASCIIPrintf(viewer," ICNTL(23) (max size of memory can be allocated locally):%d \n",lu->id.ICNTL(23));
1115:
1116: PetscViewerASCIIPrintf(viewer," ICNTL(24) (detection of null pivot rows): %d \n",lu->id.ICNTL(24));
1117: PetscViewerASCIIPrintf(viewer," ICNTL(25) (computation of a null space basis): %d \n",lu->id.ICNTL(25));
1118: PetscViewerASCIIPrintf(viewer," ICNTL(26) (Schur options for rhs or solution): %d \n",lu->id.ICNTL(26));
1119: PetscViewerASCIIPrintf(viewer," ICNTL(27) (experimental parameter): %d \n",lu->id.ICNTL(27));
1120: PetscViewerASCIIPrintf(viewer," ICNTL(28) (use parallel or sequential ordering): %d \n",lu->id.ICNTL(28));
1121: PetscViewerASCIIPrintf(viewer," ICNTL(29) (parallel ordering): %d \n",lu->id.ICNTL(29));
1122:
1123: PetscViewerASCIIPrintf(viewer," ICNTL(30) (user-specified set of entries in inv(A)): %d \n",lu->id.ICNTL(30));
1124: PetscViewerASCIIPrintf(viewer," ICNTL(31) (factors is discarded in the solve phase): %d \n",lu->id.ICNTL(31));
1125: PetscViewerASCIIPrintf(viewer," ICNTL(33) (compute determinant): %d \n",lu->id.ICNTL(33));
1126:
1127: PetscViewerASCIIPrintf(viewer," CNTL(1) (relative pivoting threshold): %g \n",lu->id.CNTL(1));
1128: PetscViewerASCIIPrintf(viewer," CNTL(2) (stopping criterion of refinement): %g \n",lu->id.CNTL(2));
1129: PetscViewerASCIIPrintf(viewer," CNTL(3) (absolute pivoting threshold): %g \n",lu->id.CNTL(3));
1130: PetscViewerASCIIPrintf(viewer," CNTL(4) (value of static pivoting): %g \n",lu->id.CNTL(4));
1131: PetscViewerASCIIPrintf(viewer," CNTL(5) (fixation for null pivots): %g \n",lu->id.CNTL(5));
1132:
1133: /* infomation local to each processor */
1134: PetscViewerASCIIPrintf(viewer, " RINFO(1) (local estimated flops for the elimination after analysis): \n");
1135: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
1136: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %g \n",lu->myid,lu->id.RINFO(1));
1137: PetscViewerFlush(viewer);
1138: PetscViewerASCIIPrintf(viewer, " RINFO(2) (local estimated flops for the assembly after factorization): \n");
1139: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %g \n",lu->myid,lu->id.RINFO(2));
1140: PetscViewerFlush(viewer);
1141: PetscViewerASCIIPrintf(viewer, " RINFO(3) (local estimated flops for the elimination after factorization): \n");
1142: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %g \n",lu->myid,lu->id.RINFO(3));
1143: PetscViewerFlush(viewer);
1144:
1145: PetscViewerASCIIPrintf(viewer, " INFO(15) (estimated size of (in MB) MUMPS internal data for running numerical factorization): \n");
1146: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %d \n",lu->myid,lu->id.INFO(15));
1147: PetscViewerFlush(viewer);
1148:
1149: PetscViewerASCIIPrintf(viewer, " INFO(16) (size of (in MB) MUMPS internal data used during numerical factorization): \n");
1150: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %d \n",lu->myid,lu->id.INFO(16));
1151: PetscViewerFlush(viewer);
1152:
1153: PetscViewerASCIIPrintf(viewer, " INFO(23) (num of pivots eliminated on this processor after factorization): \n");
1154: PetscViewerASCIISynchronizedPrintf(viewer," [%d] %d \n",lu->myid,lu->id.INFO(23));
1155: PetscViewerFlush(viewer);
1156: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
1158: if (!lu->myid){ /* information from the host */
1159: PetscViewerASCIIPrintf(viewer," RINFOG(1) (global estimated flops for the elimination after analysis): %g \n",lu->id.RINFOG(1));
1160: PetscViewerASCIIPrintf(viewer," RINFOG(2) (global estimated flops for the assembly after factorization): %g \n",lu->id.RINFOG(2));
1161: PetscViewerASCIIPrintf(viewer," RINFOG(3) (global estimated flops for the elimination after factorization): %g \n",lu->id.RINFOG(3));
1162: PetscViewerASCIIPrintf(viewer," (RINFOG(12) RINFOG(13))*2^INFOG(34) (determinant): (%g,%g)*(2^%d)\n",lu->id.RINFOG(12),lu->id.RINFOG(13),lu->id.INFOG(34));
1163:
1164: PetscViewerASCIIPrintf(viewer," INFOG(3) (estimated real workspace for factors on all processors after analysis): %d \n",lu->id.INFOG(3));
1165: PetscViewerASCIIPrintf(viewer," INFOG(4) (estimated integer workspace for factors on all processors after analysis): %d \n",lu->id.INFOG(4));
1166: PetscViewerASCIIPrintf(viewer," INFOG(5) (estimated maximum front size in the complete tree): %d \n",lu->id.INFOG(5));
1167: PetscViewerASCIIPrintf(viewer," INFOG(6) (number of nodes in the complete tree): %d \n",lu->id.INFOG(6));
1168: PetscViewerASCIIPrintf(viewer," INFOG(7) (ordering option effectively use after analysis): %d \n",lu->id.INFOG(7));
1169: PetscViewerASCIIPrintf(viewer," INFOG(8) (structural symmetry in percent of the permuted matrix after analysis): %d \n",lu->id.INFOG(8));
1170: PetscViewerASCIIPrintf(viewer," INFOG(9) (total real/complex workspace to store the matrix factors after factorization): %d \n",lu->id.INFOG(9));
1171: PetscViewerASCIIPrintf(viewer," INFOG(10) (total integer space store the matrix factors after factorization): %d \n",lu->id.INFOG(10));
1172: PetscViewerASCIIPrintf(viewer," INFOG(11) (order of largest frontal matrix after factorization): %d \n",lu->id.INFOG(11));
1173: PetscViewerASCIIPrintf(viewer," INFOG(12) (number of off-diagonal pivots): %d \n",lu->id.INFOG(12));
1174: PetscViewerASCIIPrintf(viewer," INFOG(13) (number of delayed pivots after factorization): %d \n",lu->id.INFOG(13));
1175: PetscViewerASCIIPrintf(viewer," INFOG(14) (number of memory compress after factorization): %d \n",lu->id.INFOG(14));
1176: PetscViewerASCIIPrintf(viewer," INFOG(15) (number of steps of iterative refinement after solution): %d \n",lu->id.INFOG(15));
1177: PetscViewerASCIIPrintf(viewer," INFOG(16) (estimated size (in MB) of all MUMPS internal data for factorization after analysis: value on the most memory consuming processor): %d \n",lu->id.INFOG(16));
1178: PetscViewerASCIIPrintf(viewer," INFOG(17) (estimated size of all MUMPS internal data for factorization after analysis: sum over all processors): %d \n",lu->id.INFOG(17));
1179: PetscViewerASCIIPrintf(viewer," INFOG(18) (size of all MUMPS internal data allocated during factorization: value on the most memory consuming processor): %d \n",lu->id.INFOG(18));
1180: PetscViewerASCIIPrintf(viewer," INFOG(19) (size of all MUMPS internal data allocated during factorization: sum over all processors): %d \n",lu->id.INFOG(19));
1181: PetscViewerASCIIPrintf(viewer," INFOG(20) (estimated number of entries in the factors): %d \n",lu->id.INFOG(20));
1182: PetscViewerASCIIPrintf(viewer," INFOG(21) (size in MB of memory effectively used during factorization - value on the most memory consuming processor): %d \n",lu->id.INFOG(21));
1183: PetscViewerASCIIPrintf(viewer," INFOG(22) (size in MB of memory effectively used during factorization - sum over all processors): %d \n",lu->id.INFOG(22));
1184: PetscViewerASCIIPrintf(viewer," INFOG(23) (after analysis: value of ICNTL(6) effectively used): %d \n",lu->id.INFOG(23));
1185: PetscViewerASCIIPrintf(viewer," INFOG(24) (after analysis: value of ICNTL(12) effectively used): %d \n",lu->id.INFOG(24));
1186: PetscViewerASCIIPrintf(viewer," INFOG(25) (after factorization: number of pivots modified by static pivoting): %d \n",lu->id.INFOG(25));
1187: }
1188: }
1189: }
1190: return(0);
1191: }
1195: PetscErrorCode MatGetInfo_MUMPS(Mat A,MatInfoType flag,MatInfo *info)
1196: {
1197: Mat_MUMPS *mumps =(Mat_MUMPS*)A->spptr;
1200: info->block_size = 1.0;
1201: info->nz_allocated = mumps->id.INFOG(20);
1202: info->nz_used = mumps->id.INFOG(20);
1203: info->nz_unneeded = 0.0;
1204: info->assemblies = 0.0;
1205: info->mallocs = 0.0;
1206: info->memory = 0.0;
1207: info->fill_ratio_given = 0;
1208: info->fill_ratio_needed = 0;
1209: info->factor_mallocs = 0;
1210: return(0);
1211: }
1213: /* -------------------------------------------------------------------------------------------*/
1216: PetscErrorCode MatMumpsSetIcntl_MUMPS(Mat F,PetscInt icntl,PetscInt ival)
1217: {
1218: Mat_MUMPS *lu =(Mat_MUMPS*)F->spptr;
1221: lu->id.ICNTL(icntl) = ival;
1222: return(0);
1223: }
1227: /*@
1228: MatMumpsSetIcntl - Set MUMPS parameter ICNTL()
1230: Logically Collective on Mat
1232: Input Parameters:
1233: + F - the factored matrix obtained by calling MatGetFactor() from PETSc-MUMPS interface
1234: . icntl - index of MUMPS parameter array ICNTL()
1235: - ival - value of MUMPS ICNTL(icntl)
1237: Options Database:
1238: . -mat_mumps_icntl_<icntl> <ival>
1240: Level: beginner
1242: References: MUMPS Users' Guide
1244: .seealso: MatGetFactor()
1245: @*/
1246: PetscErrorCode MatMumpsSetIcntl(Mat F,PetscInt icntl,PetscInt ival)
1247: {
1253: PetscTryMethod(F,"MatMumpsSetIcntl_C",(Mat,PetscInt,PetscInt),(F,icntl,ival));
1254: return(0);
1255: }
1257: /*MC
1258: MATSOLVERMUMPS - A matrix type providing direct solvers (LU and Cholesky) for
1259: distributed and sequential matrices via the external package MUMPS.
1261: Works with MATAIJ and MATSBAIJ matrices
1263: Options Database Keys:
1264: + -mat_mumps_icntl_4 <0,...,4> - print level
1265: . -mat_mumps_icntl_6 <0,...,7> - matrix prescaling options (see MUMPS User's Guide)
1266: . -mat_mumps_icntl_7 <0,...,7> - matrix orderings (see MUMPS User's Guidec)
1267: . -mat_mumps_icntl_9 <1,2> - A or A^T x=b to be solved: 1 denotes A, 2 denotes A^T
1268: . -mat_mumps_icntl_10 <n> - maximum number of iterative refinements
1269: . -mat_mumps_icntl_11 <n> - error analysis, a positive value returns statistics during -ksp_view
1270: . -mat_mumps_icntl_12 <n> - efficiency control (see MUMPS User's Guide)
1271: . -mat_mumps_icntl_13 <n> - efficiency control (see MUMPS User's Guide)
1272: . -mat_mumps_icntl_14 <n> - efficiency control (see MUMPS User's Guide)
1273: . -mat_mumps_icntl_15 <n> - efficiency control (see MUMPS User's Guide)
1274: . -mat_mumps_cntl_1 <delta> - relative pivoting threshold
1275: . -mat_mumps_cntl_2 <tol> - stopping criterion for refinement
1276: - -mat_mumps_cntl_3 <adelta> - absolute pivoting threshold
1278: Level: beginner
1280: .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage
1282: M*/
1284: EXTERN_C_BEGIN
1287: PetscErrorCode MatFactorGetSolverPackage_mumps(Mat A,const MatSolverPackage *type)
1288: {
1290: *type = MATSOLVERMUMPS;
1291: return(0);
1292: }
1293: EXTERN_C_END
1295: EXTERN_C_BEGIN
1296: /* MatGetFactor for Seq and MPI AIJ matrices */
1299: PetscErrorCode MatGetFactor_aij_mumps(Mat A,MatFactorType ftype,Mat *F)
1300: {
1301: Mat B;
1303: Mat_MUMPS *mumps;
1304: PetscBool isSeqAIJ;
1307: /* Create the factorization matrix */
1308: PetscTypeCompare((PetscObject)A,MATSEQAIJ,&isSeqAIJ);
1309: MatCreate(((PetscObject)A)->comm,&B);
1310: MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1311: MatSetType(B,((PetscObject)A)->type_name);
1312: if (isSeqAIJ) {
1313: MatSeqAIJSetPreallocation(B,0,PETSC_NULL);
1314: } else {
1315: MatMPIAIJSetPreallocation(B,0,PETSC_NULL,0,PETSC_NULL);
1316: }
1318: PetscNewLog(B,Mat_MUMPS,&mumps);
1319: B->ops->view = MatView_MUMPS;
1320: B->ops->getinfo = MatGetInfo_MUMPS;
1321: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_mumps",MatFactorGetSolverPackage_mumps);
1322: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMumpsSetIcntl_C","MatMumpsSetIcntl_MUMPS",MatMumpsSetIcntl_MUMPS);
1323: if (ftype == MAT_FACTOR_LU) {
1324: B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMUMPS;
1325: B->factortype = MAT_FACTOR_LU;
1326: if (isSeqAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqaij_seqaij;
1327: else mumps->ConvertToTriples = MatConvertToTriples_mpiaij_mpiaij;
1328: mumps->sym = 0;
1329: } else {
1330: B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MUMPS;
1331: B->factortype = MAT_FACTOR_CHOLESKY;
1332: if (isSeqAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqaij_seqsbaij;
1333: else mumps->ConvertToTriples = MatConvertToTriples_mpiaij_mpisbaij;
1334: if (A->spd_set && A->spd) mumps->sym = 1;
1335: else mumps->sym = 2;
1336: }
1338: mumps->isAIJ = PETSC_TRUE;
1339: mumps->Destroy = B->ops->destroy;
1340: B->ops->destroy = MatDestroy_MUMPS;
1341: B->spptr = (void*)mumps;
1342: PetscInitializeMUMPS(A,mumps);
1344: *F = B;
1345: return(0);
1346: }
1347: EXTERN_C_END
1350: EXTERN_C_BEGIN
1351: /* MatGetFactor for Seq and MPI SBAIJ matrices */
1354: PetscErrorCode MatGetFactor_sbaij_mumps(Mat A,MatFactorType ftype,Mat *F)
1355: {
1356: Mat B;
1358: Mat_MUMPS *mumps;
1359: PetscBool isSeqSBAIJ;
1362: if (ftype != MAT_FACTOR_CHOLESKY) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Cannot use PETSc SBAIJ matrices with MUMPS LU, use AIJ matrix");
1363: if(A->rmap->bs > 1) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Cannot use PETSc SBAIJ matrices with block size > 1 with MUMPS Cholesky, use AIJ matrix instead");
1364: PetscTypeCompare((PetscObject)A,MATSEQSBAIJ,&isSeqSBAIJ);
1365: /* Create the factorization matrix */
1366: MatCreate(((PetscObject)A)->comm,&B);
1367: MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1368: MatSetType(B,((PetscObject)A)->type_name);
1369: PetscNewLog(B,Mat_MUMPS,&mumps);
1370: if (isSeqSBAIJ) {
1371: MatSeqSBAIJSetPreallocation(B,1,0,PETSC_NULL);
1372: mumps->ConvertToTriples = MatConvertToTriples_seqsbaij_seqsbaij;
1373: } else {
1374: MatMPISBAIJSetPreallocation(B,1,0,PETSC_NULL,0,PETSC_NULL);
1375: mumps->ConvertToTriples = MatConvertToTriples_mpisbaij_mpisbaij;
1376: }
1378: B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MUMPS;
1379: B->ops->view = MatView_MUMPS;
1380: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_mumps",MatFactorGetSolverPackage_mumps);
1381: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMumpsSetIcntl_C","MatMumpsSetIcntl",MatMumpsSetIcntl);
1382: B->factortype = MAT_FACTOR_CHOLESKY;
1383: if (A->spd_set && A->spd) mumps->sym = 1;
1384: else mumps->sym = 2;
1386: mumps->isAIJ = PETSC_FALSE;
1387: mumps->Destroy = B->ops->destroy;
1388: B->ops->destroy = MatDestroy_MUMPS;
1389: B->spptr = (void*)mumps;
1390: PetscInitializeMUMPS(A,mumps);
1392: *F = B;
1393: return(0);
1394: }
1395: EXTERN_C_END
1397: EXTERN_C_BEGIN
1400: PetscErrorCode MatGetFactor_baij_mumps(Mat A,MatFactorType ftype,Mat *F)
1401: {
1402: Mat B;
1404: Mat_MUMPS *mumps;
1405: PetscBool isSeqBAIJ;
1408: /* Create the factorization matrix */
1409: PetscTypeCompare((PetscObject)A,MATSEQBAIJ,&isSeqBAIJ);
1410: MatCreate(((PetscObject)A)->comm,&B);
1411: MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1412: MatSetType(B,((PetscObject)A)->type_name);
1413: if (isSeqBAIJ) {
1414: MatSeqBAIJSetPreallocation(B,A->rmap->bs,0,PETSC_NULL);
1415: } else {
1416: MatMPIBAIJSetPreallocation(B,A->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);
1417: }
1419: PetscNewLog(B,Mat_MUMPS,&mumps);
1420: if (ftype == MAT_FACTOR_LU) {
1421: B->ops->lufactorsymbolic = MatLUFactorSymbolic_BAIJMUMPS;
1422: B->factortype = MAT_FACTOR_LU;
1423: if (isSeqBAIJ) mumps->ConvertToTriples = MatConvertToTriples_seqbaij_seqaij;
1424: else mumps->ConvertToTriples = MatConvertToTriples_mpibaij_mpiaij;
1425: mumps->sym = 0;
1426: } else {
1427: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot use PETSc BAIJ matrices with MUMPS Cholesky, use SBAIJ or AIJ matrix instead\n");
1428: }
1430: B->ops->view = MatView_MUMPS;
1431: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_mumps",MatFactorGetSolverPackage_mumps);
1432: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMumpsSetIcntl_C","MatMumpsSetIcntl_MUMPS",MatMumpsSetIcntl_MUMPS);
1434: mumps->isAIJ = PETSC_TRUE;
1435: mumps->Destroy = B->ops->destroy;
1436: B->ops->destroy = MatDestroy_MUMPS;
1437: B->spptr = (void*)mumps;
1438: PetscInitializeMUMPS(A,mumps);
1440: *F = B;
1441: return(0);
1442: }
1443: EXTERN_C_END