Actual source code: ex106.c
petsc-3.8.4 2018-03-24
2: static char help[] = "Test repeated LU factorizations. Used for checking memory leak\n\
3: -m <size> : problem size\n\
4: -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";
6: #include <petscmat.h>
7: int main(int argc,char **args)
8: {
9: Mat C,F; /* matrix */
10: Vec x,u,b; /* approx solution, RHS, exact solution */
11: PetscReal norm; /* norm of solution error */
12: PetscScalar v,none = -1.0;
13: PetscInt I,J,ldim,low,high,iglobal,Istart,Iend;
15: PetscInt i,j,m = 3,n = 2,its;
16: PetscMPIInt size,rank;
17: PetscBool mat_nonsymmetric;
18: PetscInt its_max;
19: MatFactorInfo factinfo;
20: IS perm,iperm;
22: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
23: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
24: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
25: MPI_Comm_size(PETSC_COMM_WORLD,&size);
26: n = 2*size;
28: /*
29: Set flag if we are doing a nonsymmetric problem; the default is symmetric.
30: */
31: PetscOptionsHasName(NULL,NULL,"-mat_nonsym",&mat_nonsymmetric);
33: /*
34: Create parallel matrix, specifying only its global dimensions.
35: When using MatCreate(), the matrix format can be specified at
36: runtime. Also, the parallel partitioning of the matrix is
37: determined by PETSc at runtime.
38: */
39: MatCreate(PETSC_COMM_WORLD,&C);
40: MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
41: MatSetFromOptions(C);
42: MatGetOwnershipRange(C,&Istart,&Iend);
44: /*
45: Set matrix entries matrix in parallel.
46: - Each processor needs to insert only elements that it owns
47: locally (but any non-local elements will be sent to the
48: appropriate processor during matrix assembly).
49: - Always specify global row and columns of matrix entries.
50: */
51: for (I=Istart; I<Iend; I++) {
52: v = -1.0; i = I/n; j = I - i*n;
53: if (i>0) {J = I - n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
54: if (i<m-1) {J = I + n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
55: if (j>0) {J = I - 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
56: if (j<n-1) {J = I + 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
57: v = 4.0; MatSetValues(C,1,&I,1,&I,&v,ADD_VALUES);
58: }
60: /*
61: Make the matrix nonsymmetric if desired
62: */
63: if (mat_nonsymmetric) {
64: for (I=Istart; I<Iend; I++) {
65: v = -1.5; i = I/n;
66: if (i>1) {J = I-n-1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
67: }
68: } else {
69: MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE);
70: MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
71: }
73: /*
74: Assemble matrix, using the 2-step process:
75: MatAssemblyBegin(), MatAssemblyEnd()
76: Computations can be done while messages are in transition
77: by placing code between these two statements.
78: */
79: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
80: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
82: its_max=1000;
83: /*
84: Create parallel vectors.
85: - When using VecSetSizes(), we specify only the vector's global
86: dimension; the parallel partitioning is determined at runtime.
87: - Note: We form 1 vector from scratch and then duplicate as needed.
88: */
89: VecCreate(PETSC_COMM_WORLD,&u);
90: VecSetSizes(u,PETSC_DECIDE,m*n);
91: VecSetFromOptions(u);
92: VecDuplicate(u,&b);
93: VecDuplicate(b,&x);
95: /*
96: Currently, all parallel PETSc vectors are partitioned by
97: contiguous chunks across the processors. Determine which
98: range of entries are locally owned.
99: */
100: VecGetOwnershipRange(x,&low,&high);
102: /*
103: Set elements within the exact solution vector in parallel.
104: - Each processor needs to insert only elements that it owns
105: locally (but any non-local entries will be sent to the
106: appropriate processor during vector assembly).
107: - Always specify global locations of vector entries.
108: */
109: VecGetLocalSize(x,&ldim);
110: for (i=0; i<ldim; i++) {
111: iglobal = i + low;
112: v = (PetscScalar)(i + 100*rank);
113: VecSetValues(u,1,&iglobal,&v,INSERT_VALUES);
114: }
116: /*
117: Assemble vector, using the 2-step process:
118: VecAssemblyBegin(), VecAssemblyEnd()
119: Computations can be done while messages are in transition,
120: by placing code between these two statements.
121: */
122: VecAssemblyBegin(u);
123: VecAssemblyEnd(u);
125: /* Compute right-hand-side vector */
126: MatMult(C,u,b);
128: MatGetOrdering(C,MATORDERINGNATURAL,&perm,&iperm);
129: its_max = 2000;
130: for (i=0; i<its_max; i++) {
131: MatGetFactor(C,MATSOLVERPETSC,MAT_FACTOR_LU,&F);
132: MatLUFactorSymbolic(F,C,perm,iperm,&factinfo);
133: for (j=0; j<1; j++) {
134: MatLUFactorNumeric(F,C,&factinfo);
135: }
136: MatSolve(F,b,x);
137: MatDestroy(&F);
138: }
139: ISDestroy(&perm);
140: ISDestroy(&iperm);
142: /* Check the error */
143: VecAXPY(x,none,u);
144: VecNorm(x,NORM_2,&norm);
145: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %t\n",(double)norm);
147: /* Free work space. */
148: VecDestroy(&u);
149: VecDestroy(&x);
150: VecDestroy(&b);
151: MatDestroy(&C);
152: PetscFinalize();
153: return ierr;
154: }