Actual source code: ex18.c
petsc-3.6.4 2016-04-12
1: static char help[] = "Tests the use of MatZeroRowsColumns() for parallel matrices.\n\
2: Contributed-by: Stephan Kramer <s.kramer@imperial.ac.uk>\n\n";
4: #include <petscmat.h>
8: int main(int argc,char **args)
9: {
10: Mat A;
11: Vec x, rhs, y;
12: PetscInt i,j,k,b,m = 3,n,nlocal=2,bs=1,Ii,J;
13: PetscInt *boundary_nodes, nboundary_nodes, *boundary_indices;
14: PetscMPIInt rank,size;
16: PetscScalar v,v0,v1,v2,a0=0.1,a,rhsval, *boundary_values;
17: PetscReal norm;
18: PetscBool upwind = PETSC_FALSE, nonlocalBC = PETSC_FALSE;
20: PetscInitialize(&argc,&args,(char*)0,help);
21: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
22: MPI_Comm_size(PETSC_COMM_WORLD,&size);
23: n = nlocal*size;
25: PetscOptionsGetInt(NULL, "-bs", &bs, NULL);
26: PetscOptionsGetBool(NULL, "-nonlocal_bc", &nonlocalBC, NULL);
28: MatCreate(PETSC_COMM_WORLD,&A);
29: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n*bs,m*n*bs);
30: MatSetFromOptions(A);
31: MatSetUp(A);
33: VecCreate(PETSC_COMM_WORLD, &rhs);
34: VecSetSizes(rhs, PETSC_DECIDE, m*n*bs);
35: VecSetFromOptions(rhs);
36: VecSetUp(rhs);
38: rhsval = 0.0;
39: for (i=0; i<m; i++) {
40: for (j=nlocal*rank; j<nlocal*(rank+1); j++) {
41: a = a0;
42: for (b=0; b<bs; b++) {
43: /* let's start with a 5-point stencil diffusion term */
44: v = -1.0; Ii = (j + n*i)*bs + b;
45: if (i>0) {J = Ii - n*bs; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
46: if (i<m-1) {J = Ii + n*bs; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
47: if (j>0) {J = Ii - 1*bs; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
48: if (j<n-1) {J = Ii + 1*bs; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
49: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,ADD_VALUES);
50: if (upwind) {
51: /* now add a 2nd order upwind advection term to add a little asymmetry */
52: if (j>2) {
53: J = Ii-2*bs; v2 = 0.5*a; v1 = -2.0*a; v0 = 1.5*a;
54: MatSetValues(A,1,&Ii,1,&J,&v2,ADD_VALUES);
55: } else {
56: /* fall back to 1st order upwind */
57: v1 = -1.0*a; v0 = 1.0*a;
58: };
59: if (j>1) {J = Ii-1*bs; MatSetValues(A,1,&Ii,1,&J,&v1,ADD_VALUES);}
60: MatSetValues(A,1,&Ii,1,&Ii,&v0,ADD_VALUES);
61: a /= 10.; /* use a different velocity for the next component */
62: /* add a coupling to the previous and next components */
63: v = 0.5;
64: if (b>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
65: if (b<bs-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
66: }
67: /* make up some rhs */
68: VecSetValue(rhs, Ii, rhsval, INSERT_VALUES);
69: rhsval += 1.0;
70: }
71: }
72: }
73: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
74: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
75: VecAssemblyBegin(rhs);
76: VecAssemblyEnd(rhs);
77: /* set rhs to zero to simplify */
78: VecZeroEntries(rhs);
80: if (nonlocalBC) {
81: /*version where boundary conditions are set by processes that don't necessarily own the nodes */
82: if (!rank) {
83: nboundary_nodes = size>m ? nlocal : m-size+nlocal;
84: PetscMalloc1(nboundary_nodes,&boundary_nodes);
85: k = 0;
86: for (i=size; i<m; i++,k++) {boundary_nodes[k] = n*i;};
87: } else if (rank < m) {
88: nboundary_nodes = nlocal+1;
89: PetscMalloc1(nboundary_nodes,&boundary_nodes);
90: boundary_nodes[0] = rank*n;
91: k = 1;
92: } else {
93: nboundary_nodes = nlocal;
94: PetscMalloc1(nboundary_nodes,&boundary_nodes);
95: k = 0;
96: }
97: for (j=nlocal*rank; j<nlocal*(rank+1); j++,k++) {boundary_nodes[k] = j;};
98: } else {
99: /*version where boundary conditions are set by the node owners only */
100: PetscMalloc1(m*n,&boundary_nodes);
101: k=0;
102: for (j=0; j<n; j++) {
103: Ii = j;
104: if (Ii>=rank*m*nlocal && Ii<(rank+1)*m*nlocal) boundary_nodes[k++] = Ii;
105: }
106: for (i=1; i<m; i++) {
107: Ii = n*i;
108: if (Ii>=rank*m*nlocal && Ii<(rank+1)*m*nlocal) boundary_nodes[k++] = Ii;
109: }
110: nboundary_nodes = k;
111: }
113: VecDuplicate(rhs, &x);
114: VecZeroEntries(x);
115: PetscMalloc2(nboundary_nodes*bs,&boundary_indices,nboundary_nodes*bs,&boundary_values);
116: for (k=0; k<nboundary_nodes; k++) {
117: Ii = boundary_nodes[k]*bs;
118: v = 1.0*boundary_nodes[k];
119: for (b=0; b<bs; b++, Ii++) {
120: boundary_indices[k*bs+b] = Ii;
121: boundary_values[k*bs+b] = v;
122: PetscSynchronizedPrintf(PETSC_COMM_WORLD, "%d %D %f\n", rank, Ii, (double)PetscRealPart(v));
123: v += 0.1;
124: }
125: }
126: PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL);
127: if (nboundary_nodes) {VecSetValues(x, nboundary_nodes*bs, boundary_indices, boundary_values, INSERT_VALUES);}
128: VecAssemblyBegin(x);
129: VecAssemblyEnd(x);
131: /* We can check the rhs returned by MatZeroColumns by computing y=rhs-A*x and overwriting the boundary entries with boundary values */
132: VecDuplicate(x, &y);
133: MatMult(A, x, y);
134: VecAYPX(y, -1.0, rhs);
135: if (nboundary_nodes) {VecSetValues(y, nboundary_nodes*bs, boundary_indices, boundary_values, INSERT_VALUES);}
136: VecAssemblyBegin(y);
137: VecAssemblyEnd(y);
139: PetscPrintf(PETSC_COMM_WORLD, "*** Matrix A and vector x:\n");
140: MatView(A, PETSC_VIEWER_STDOUT_WORLD);
141: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
143: MatZeroRowsColumns(A, nboundary_nodes*bs, boundary_indices, 1.0, x, rhs);
144: PetscPrintf(PETSC_COMM_WORLD, "*** Vector rhs returned by MatZeroRowsColumns\n");
145: VecView(rhs,PETSC_VIEWER_STDOUT_WORLD);
146: VecAXPY(y, -1.0, rhs);
147: VecNorm(y, NORM_INFINITY, &norm);
148: if (norm > 1.0e-10) {
149: PetscPrintf(PETSC_COMM_WORLD, "*** Difference between rhs and y, inf-norm: %f\n", (double)norm);
150: VecView(y,PETSC_VIEWER_STDOUT_WORLD);
151: SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Bug in MatZeroRowsColumns");
152: }
154: PetscFree(boundary_nodes);
155: PetscFree2(boundary_indices,boundary_values);
156: VecDestroy(&x);
157: VecDestroy(&y);
158: VecDestroy(&rhs);
159: MatDestroy(&A);
161: PetscFinalize();
162: return 0;
163: }