Actual source code: ex4.c
petsc-3.8.4 2018-03-24
1: static char help[] = "Test MatSetValuesBatch: setting batches of elements using the GPU.\n\
2: This works with SeqAIJCUSP and MPIAIJCUSP matrices.\n\n";
3: #include <petscdm.h>
4: #include <petscdmda.h>
5: #include <petscksp.h>
7: /* We will use a structured mesh for this assembly test. Each square will be divided into two triangles:
8: C D
9: _______
10: |\ | The matrix for 0 and 1 is / 1 -0.5 -0.5 \
11: | \ 1 | | -0.5 0.5 0.0 |
12: | \ | \ -0.5 0.0 0.5 /
13: | \ |
14: | \ |
15: | 0 \ |
16: | \|
17: ---------
18: A B
20: TO ADD:
21: DONE 1) Build and run on baconost
22: - Gather data for CPU/GPU up to da_grid_x 1300
23: - Looks 6x faster than CPU
24: - Make plot
26: DONE 2) Solve the Neumann Poisson problem
28: 3) Multi-GPU Assembly
29: - MPIAIJCUSP: Just have two SEQAIJCUSP matrices, nothing else special
30: a) Filter rows to be sent to other procs (normally stashed)
31: b) send/recv rows, might as well do with a VecScatter
32: c) Potential to overlap this computation w/ GPU (talk to Nathan)
33: c') Just shove these rows in after the local
34: d) Have implicit rep of COO from repeated/tiled_range
35: e) Do a filtered copy, decrementing rows and remapping columns, which splits into two sets
36: f) Make two COO matrices and do separate aggregation on each one
38: 4) Solve the Neumann Poisson problem in parallel
39: - Try it on GPU machine at Brown (They need another GNU install)
41: 5) GPU FEM integration
42: - Move launch code to PETSc or - Try again now that assembly is in PETSc
43: - Move build code to PETSc
45: 6) Try out CUSP PCs
46: */
48: PetscErrorCode IntegrateCells(DM dm, PetscInt *Ne, PetscInt *Nl, PetscInt **elemRows, PetscScalar **elemMats)
49: {
50: DMDALocalInfo info;
51: PetscInt *er;
52: PetscScalar *em;
53: PetscInt X, Y, dof;
54: PetscInt nl, nxe, nye, ne;
55: PetscInt k = 0, m = 0;
56: PetscInt i, j;
58: #if defined(PETSC_USE_LOG)
59: PetscLogEvent integrationEvent;
60: #endif
63: PetscLogEventRegister("ElemIntegration", DM_CLASSID, &integrationEvent);
64: PetscLogEventBegin(integrationEvent,0,0,0,0);
65: DMDAGetInfo(dm, 0, &X, &Y,0,0,0,0, &dof,0,0,0,0,0);
66: DMDAGetLocalInfo(dm, &info);
67: nl = dof*3;
68: nxe = info.xm; if (info.xs+info.xm == X) nxe--;
69: nye = info.ym; if (info.ys+info.ym == Y) nye--;
70: ne = 2 * nxe * nye;
71: *Ne = ne;
72: *Nl = nl;
73: PetscMalloc2(ne*nl, elemRows, ne*nl*nl, elemMats);
74: er = *elemRows;
75: em = *elemMats;
76: /* Proc 0 Proc 1 */
77: /* xs: 0 xm: 3 xs: 0 xm: 3 */
78: /* ys: 0 ym: 2 ys: 2 ym: 1 */
79: /* 8 elements x 3 vertices = 24 element matrix rows and 72 entries */
80: /* 6 offproc rows containing 18 element matrix entries */
81: /* 18 onproc rows containing 54 element matrix entries */
82: /* 3 offproc columns in 8 element matrix entries */
83: /* so we should have 46 diagonal matrix entries */
84: for (j = info.ys; j < info.ys+nye; ++j) {
85: for (i = info.xs; i < info.xs+nxe; ++i) {
86: PetscInt rowA = j*X + i, rowB = j*X + i+1;
87: PetscInt rowC = (j+1)*X + i, rowD = (j+1)*X + i+1;
89: /* Lower triangle */
90: er[k+0] = rowA; em[m+0*nl+0] = 1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
91: er[k+1] = rowB; em[m+1*nl+0] = -0.5; em[m+1*nl+1] = 0.5; em[m+1*nl+2] = 0.0;
92: er[k+2] = rowC; em[m+2*nl+0] = -0.5; em[m+2*nl+1] = 0.0; em[m+2*nl+2] = 0.5;
93: k += nl; m += nl*nl;
94: /* Upper triangle */
95: er[k+0] = rowD; em[m+0*nl+0] = 1.0; em[m+0*nl+1] = -0.5; em[m+0*nl+2] = -0.5;
96: er[k+1] = rowC; em[m+1*nl+0] = -0.5; em[m+1*nl+1] = 0.5; em[m+1*nl+2] = 0.0;
97: er[k+2] = rowB; em[m+2*nl+0] = -0.5; em[m+2*nl+1] = 0.0; em[m+2*nl+2] = 0.5;
98: k += nl; m += nl*nl;
99: }
100: }
101: PetscLogEventEnd(integrationEvent,0,0,0,0);
102: return(0);
103: }
105: int main(int argc, char **argv)
106: {
107: KSP ksp;
108: MatNullSpace nullsp;
109: DM dm;
110: Mat A;
111: Vec x, b;
112: PetscViewer viewer;
113: PetscInt Nl, Ne;
114: PetscInt *elemRows;
115: PetscScalar *elemMats;
116: PetscBool doView = PETSC_TRUE;
118: #if defined(PETSC_USE_LOG)
119: PetscLogStage gpuStage, cpuStage;
120: #endif
122: PetscInitialize(&argc, &argv, 0, help);if (ierr) return ierr;
123: DMDACreate2d(PETSC_COMM_WORLD, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DMDA_STENCIL_BOX, -3, -3, PETSC_DECIDE, PETSC_DECIDE, 1, 1, NULL, NULL, &dm);
124: DMSetFromOptions(dm);
125: DMSetUp(dm);
126: IntegrateCells(dm, &Ne, &Nl, &elemRows, &elemMats);
127: PetscOptionsGetBool(NULL,NULL, "-view", &doView, NULL);
129: /* Construct matrix using GPU */
130: PetscLogStageRegister("GPU Stage", &gpuStage);
131: PetscLogStagePush(gpuStage);
132: DMSetMatType(dm,MATAIJ);
133: DMCreateMatrix(dm, &A);
134: MatSetType(A, MATAIJCUSP);
135: MatSeqAIJSetPreallocation(A, 0, NULL);
136: MatMPIAIJSetPreallocation(A, 0, NULL, 0, NULL);
137: MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
138: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
139: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
140: if (doView) {
141: PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
142: if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
143: MatView(A, viewer);
144: if (Ne > 500) {PetscViewerPopFormat(viewer);}
145: PetscViewerDestroy(&viewer);
146: }
147: PetscLogStagePop();
148: MatDestroy(&A);
150: /* Construct matrix using CPU */
151: PetscLogStageRegister("CPU Stage", &cpuStage);
152: PetscLogStagePush(cpuStage);
153: DMSetMatType(dm,MATAIJ);
154: DMCreateMatrix(dm, &A);
155: MatZeroEntries(A);
156: MatSetValuesBatch(A, Ne, Nl, elemRows, elemMats);
157: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
158: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
159: if (doView) {
160: PetscViewerASCIIOpen(PETSC_COMM_WORLD, NULL, &viewer);
161: if (Ne > 500) {PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO);}
162: MatView(A, viewer);
163: if (Ne > 500) {PetscViewerPopFormat(viewer);}
164: PetscViewerDestroy(&viewer);
165: }
166: PetscLogStagePop();
168: /* Solve simple system with random rhs */
169: MatCreateVecs(A, &x, &b);
170: VecSetRandom(b, NULL);
171: KSPCreate(PETSC_COMM_WORLD, &ksp);
172: KSPSetOperators(ksp, A, A);
173: MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, NULL, &nullsp);
174: MatSetNullSpace(A, nullsp);
175: MatNullSpaceDestroy(&nullsp);
176: KSPSetFromOptions(ksp);
177: KSPSolve(ksp, b, x);
178: VecDestroy(&x);
179: VecDestroy(&b);
180: /* Solve physical system:
182: -\Delta u = -6 (x + y - 1)
184: where u = x^3 - 3/2 x^2 + y^3 - 3/2y^2 + 1/2,
185: so \Delta u = 6 x - 3 + 6 y - 3,
186: and \frac{\partial u}{\partial n} = {3x (x - 1), 3y (y - 1)} \cdot n
187: = \pm 3x (x - 1) at x=0,1 = 0
188: = \pm 3y (y - 1) at y=0,1 = 0
189: */
191: MatDestroy(&A);
192: PetscFree2(elemRows, elemMats);
193: DMDestroy(&dm);
194: PetscFinalize();
195: return ierr;
196: }