Actual source code: ex106.c

petsc-3.7.3 2016-08-01
Report Typos and Errors
  2: static char help[] = "Test repeated LU factorizations. Used for checking memory leak\n\
  3:   -m <size> : problem size\n\
  4:   -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";

  6: #include <petscmat.h>
  9: int main(int argc,char **args)
 10: {
 11:   Mat            C,F;                /* matrix */
 12:   Vec            x,u,b;          /* approx solution, RHS, exact solution */
 13:   PetscReal      norm;             /* norm of solution error */
 14:   PetscScalar    v,none = -1.0;
 15:   PetscInt       I,J,ldim,low,high,iglobal,Istart,Iend;
 17:   PetscInt       i,j,m = 3,n = 2,its;
 18:   PetscMPIInt    size,rank;
 19:   PetscBool      mat_nonsymmetric;
 20:   PetscInt       its_max;
 21:   MatFactorInfo  factinfo;
 22:   IS             perm,iperm;

 24:   PetscInitialize(&argc,&args,(char*)0,help);
 25:   PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
 26:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
 27:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 28:   n    = 2*size;

 30:   /*
 31:      Set flag if we are doing a nonsymmetric problem; the default is symmetric.
 32:   */
 33:   PetscOptionsHasName(NULL,NULL,"-mat_nonsym",&mat_nonsymmetric);

 35:   /*
 36:      Create parallel matrix, specifying only its global dimensions.
 37:      When using MatCreate(), the matrix format can be specified at
 38:      runtime. Also, the parallel partitioning of the matrix is
 39:      determined by PETSc at runtime.
 40:   */
 41:   MatCreate(PETSC_COMM_WORLD,&C);
 42:   MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
 43:   MatSetFromOptions(C);
 44:   MatGetOwnershipRange(C,&Istart,&Iend);

 46:   /*
 47:      Set matrix entries matrix in parallel.
 48:       - Each processor needs to insert only elements that it owns
 49:         locally (but any non-local elements will be sent to the
 50:         appropriate processor during matrix assembly).
 51:       - Always specify global row and columns of matrix entries.
 52:   */
 53:   for (I=Istart; I<Iend; I++) {
 54:     v = -1.0; i = I/n; j = I - i*n;
 55:     if (i>0)   {J = I - n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 56:     if (i<m-1) {J = I + n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 57:     if (j>0)   {J = I - 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 58:     if (j<n-1) {J = I + 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 59:     v = 4.0; MatSetValues(C,1,&I,1,&I,&v,ADD_VALUES);
 60:   }

 62:   /*
 63:      Make the matrix nonsymmetric if desired
 64:   */
 65:   if (mat_nonsymmetric) {
 66:     for (I=Istart; I<Iend; I++) {
 67:       v = -1.5; i = I/n;
 68:       if (i>1)   {J = I-n-1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 69:     }
 70:   } else {
 71:     MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE);
 72:     MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
 73:   }

 75:   /*
 76:      Assemble matrix, using the 2-step process:
 77:        MatAssemblyBegin(), MatAssemblyEnd()
 78:      Computations can be done while messages are in transition
 79:      by placing code between these two statements.
 80:   */
 81:   MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
 82:   MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);

 84:   its_max=1000;
 85:   /*
 86:      Create parallel vectors.
 87:       - When using VecSetSizes(), we specify only the vector's global
 88:         dimension; the parallel partitioning is determined at runtime.
 89:       - Note: We form 1 vector from scratch and then duplicate as needed.
 90:   */
 91:   VecCreate(PETSC_COMM_WORLD,&u);
 92:   VecSetSizes(u,PETSC_DECIDE,m*n);
 93:   VecSetFromOptions(u);
 94:   VecDuplicate(u,&b);
 95:   VecDuplicate(b,&x);

 97:   /*
 98:      Currently, all parallel PETSc vectors are partitioned by
 99:      contiguous chunks across the processors.  Determine which
100:      range of entries are locally owned.
101:   */
102:   VecGetOwnershipRange(x,&low,&high);

104:   /*
105:     Set elements within the exact solution vector in parallel.
106:      - Each processor needs to insert only elements that it owns
107:        locally (but any non-local entries will be sent to the
108:        appropriate processor during vector assembly).
109:      - Always specify global locations of vector entries.
110:   */
111:   VecGetLocalSize(x,&ldim);
112:   for (i=0; i<ldim; i++) {
113:     iglobal = i + low;
114:     v       = (PetscScalar)(i + 100*rank);
115:     VecSetValues(u,1,&iglobal,&v,INSERT_VALUES);
116:   }

118:   /*
119:      Assemble vector, using the 2-step process:
120:        VecAssemblyBegin(), VecAssemblyEnd()
121:      Computations can be done while messages are in transition,
122:      by placing code between these two statements.
123:   */
124:   VecAssemblyBegin(u);
125:   VecAssemblyEnd(u);

127:   /* Compute right-hand-side vector */
128:   MatMult(C,u,b);

130:   MatGetOrdering(C,MATORDERINGNATURAL,&perm,&iperm);
131:   its_max = 2000;
132:   for (i=0; i<its_max; i++) {
133:     /* printf(" it %d\n",i); */
134:     MatGetFactor(C,MATSOLVERPETSC,MAT_FACTOR_LU,&F);
135:     MatLUFactorSymbolic(F,C,perm,iperm,&factinfo);
136:     for (j=0; j<1; j++) {
137:       MatLUFactorNumeric(F,C,&factinfo);
138:     }
139:     MatSolve(F,b,x);
140:     MatDestroy(&F);
141:   }
142:   ISDestroy(&perm);
143:   ISDestroy(&iperm);

145:   /* Check the error */
146:   VecAXPY(x,none,u);
147:   VecNorm(x,NORM_2,&norm);
148:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %t\n",(double)norm);

150:   /* Free work space. */
151:   VecDestroy(&u);
152:   VecDestroy(&x);
153:   VecDestroy(&b);
154:   MatDestroy(&C);
155:   PetscFinalize();
156:   return 0;
157: }