Actual source code: ex21.c
petsc-3.6.4 2016-04-12
2: static char help[] ="Solves a time-dependent nonlinear PDE with lower and upper bounds on the interior grid points. Uses implicit\n\
3: timestepping. Runtime options include:\n\
4: -M <xg>, where <xg> = number of grid points\n\
5: -debug : Activate debugging printouts\n\
6: -nox : Deactivate x-window graphics\n\
7: -ul : lower bound\n\
8: -uh : upper bound\n\n";
10: /*
11: Concepts: TS^time-dependent nonlinear problems
12: Concepts: TS^Variational inequality nonlinear solver
13: Processors: n
14: */
16: /* ------------------------------------------------------------------------
18: This is a variation of ex2.c to solve the PDE
20: u * u_xx
21: u_t = ---------
22: 2*(t+1)^2
24: with box constraints on the interior grid points
25: ul <= u(t,x) <= uh with x != 0,1
26: on the domain 0 <= x <= 1, with boundary conditions
27: u(t,0) = t + 1, u(t,1) = 2*t + 2,
28: and initial condition
29: u(0,x) = 1 + x*x.
31: The exact solution is:
32: u(t,x) = (1 + x*x) * (1 + t)
34: We use by default the backward Euler method.
36: ------------------------------------------------------------------------- */
38: /*
39: Include "petscts.h" to use the PETSc timestepping routines. Note that
40: this file automatically includes "petscsys.h" and other lower-level
41: PETSc include files.
43: Include the "petscdmda.h" to allow us to use the distributed array data
44: structures to manage the parallel grid.
45: */
46: #include <petscts.h>
47: #include <petscdm.h>
48: #include <petscdmda.h>
49: #include <petscdraw.h>
51: /*
52: User-defined application context - contains data needed by the
53: application-provided callback routines.
54: */
55: typedef struct {
56: MPI_Comm comm; /* communicator */
57: DM da; /* distributed array data structure */
58: Vec localwork; /* local ghosted work vector */
59: Vec u_local; /* local ghosted approximate solution vector */
60: Vec solution; /* global exact solution vector */
61: PetscInt m; /* total number of grid points */
62: PetscReal h; /* mesh width: h = 1/(m-1) */
63: PetscBool debug; /* flag (1 indicates activation of debugging printouts) */
64: } AppCtx;
66: /*
67: User-defined routines, provided below.
68: */
69: extern PetscErrorCode InitialConditions(Vec,AppCtx*);
70: extern PetscErrorCode RHSFunction(TS,PetscReal,Vec,Vec,void*);
71: extern PetscErrorCode RHSJacobian(TS,PetscReal,Vec,Mat,Mat,void*);
72: extern PetscErrorCode Monitor(TS,PetscInt,PetscReal,Vec,void*);
73: extern PetscErrorCode ExactSolution(PetscReal,Vec,AppCtx*);
74: extern PetscErrorCode SetBounds(Vec,Vec,PetscScalar,PetscScalar,AppCtx*);
78: int main(int argc,char **argv)
79: {
80: AppCtx appctx; /* user-defined application context */
81: TS ts; /* timestepping context */
82: Mat A; /* Jacobian matrix data structure */
83: Vec u; /* approximate solution vector */
84: Vec r; /* residual vector */
85: PetscInt time_steps_max = 1000; /* default max timesteps */
87: PetscReal dt;
88: PetscReal time_total_max = 100.0; /* default max total time */
89: Vec xl,xu; /* Lower and upper bounds on variables */
90: PetscScalar ul=0.0,uh = 3.0;
91: PetscBool mymonitor;
92: PetscReal bounds[] = {1.0, 3.3};
94: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
95: Initialize program and set problem parameters
96: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
98: PetscInitialize(&argc,&argv,(char*)0,help);
99: PetscViewerDrawSetBounds(PETSC_VIEWER_DRAW_(PETSC_COMM_WORLD),1,bounds);
101: appctx.comm = PETSC_COMM_WORLD;
102: appctx.m = 60;
103: PetscOptionsGetInt(NULL,"-M",&appctx.m,NULL);
104: PetscOptionsGetScalar(NULL,"-ul",&ul,NULL);
105: PetscOptionsGetScalar(NULL,"-uh",&uh,NULL);
106: PetscOptionsHasName(NULL,"-debug",&appctx.debug);
107: PetscOptionsHasName(NULL,"-mymonitor",&mymonitor);
108: appctx.h = 1.0/(appctx.m-1.0);
110: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
111: Create vector data structures
112: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
114: /*
115: Create distributed array (DMDA) to manage parallel grid and vectors
116: and to set up the ghost point communication pattern. There are M
117: total grid values spread equally among all the processors.
118: */
119: DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,appctx.m,1,1,NULL,
120: &appctx.da);
122: /*
123: Extract global and local vectors from DMDA; we use these to store the
124: approximate solution. Then duplicate these for remaining vectors that
125: have the same types.
126: */
127: DMCreateGlobalVector(appctx.da,&u);
128: DMCreateLocalVector(appctx.da,&appctx.u_local);
130: /*
131: Create local work vector for use in evaluating right-hand-side function;
132: create global work vector for storing exact solution.
133: */
134: VecDuplicate(appctx.u_local,&appctx.localwork);
135: VecDuplicate(u,&appctx.solution);
137: /* Create residual vector */
138: VecDuplicate(u,&r);
139: /* Create lower and upper bound vectors */
140: VecDuplicate(u,&xl);
141: VecDuplicate(u,&xu);
142: SetBounds(xl,xu,ul,uh,&appctx);
144: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
145: Create timestepping solver context; set callback routine for
146: right-hand-side function evaluation.
147: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
149: TSCreate(PETSC_COMM_WORLD,&ts);
150: TSSetProblemType(ts,TS_NONLINEAR);
151: TSSetRHSFunction(ts,r,RHSFunction,&appctx);
153: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
154: Set optional user-defined monitoring routine
155: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
157: if (mymonitor) {
158: TSMonitorSet(ts,Monitor,&appctx,NULL);
159: }
161: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
162: For nonlinear problems, the user can provide a Jacobian evaluation
163: routine (or use a finite differencing approximation).
165: Create matrix data structure; set Jacobian evaluation routine.
166: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
168: MatCreate(PETSC_COMM_WORLD,&A);
169: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,appctx.m,appctx.m);
170: MatSetFromOptions(A);
171: MatSetUp(A);
172: TSSetRHSJacobian(ts,A,A,RHSJacobian,&appctx);
174: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
175: Set solution vector and initial timestep
176: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
178: dt = appctx.h/2.0;
179: TSSetInitialTimeStep(ts,0.0,dt);
181: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
182: Customize timestepping solver:
183: - Set the solution method to be the Backward Euler method.
184: - Set timestepping duration info
185: Then set runtime options, which can override these defaults.
186: For example,
187: -ts_max_steps <maxsteps> -ts_final_time <maxtime>
188: to override the defaults set by TSSetDuration().
189: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
191: TSSetType(ts,TSBEULER);
192: TSSetDuration(ts,time_steps_max,time_total_max);
193: /* Set lower and upper bound on the solution vector for each time step */
194: TSVISetVariableBounds(ts,xl,xu);
195: TSSetFromOptions(ts);
197: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
198: Solve the problem
199: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
201: /*
202: Evaluate initial conditions
203: */
204: InitialConditions(u,&appctx);
206: /*
207: Run the timestepping solver
208: */
209: TSSolve(ts,u);
211: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
212: Free work space. All PETSc objects should be destroyed when they
213: are no longer needed.
214: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
216: VecDestroy(&r);
217: VecDestroy(&xl);
218: VecDestroy(&xu);
219: TSDestroy(&ts);
220: VecDestroy(&u);
221: MatDestroy(&A);
222: DMDestroy(&appctx.da);
223: VecDestroy(&appctx.localwork);
224: VecDestroy(&appctx.solution);
225: VecDestroy(&appctx.u_local);
227: /*
228: Always call PetscFinalize() before exiting a program. This routine
229: - finalizes the PETSc libraries as well as MPI
230: - provides summary and diagnostic information if certain runtime
231: options are chosen (e.g., -log_summary).
232: */
233: PetscFinalize();
234: return 0;
235: }
236: /* --------------------------------------------------------------------- */
239: /*
240: InitialConditions - Computes the solution at the initial time.
242: Input Parameters:
243: u - uninitialized solution vector (global)
244: appctx - user-defined application context
246: Output Parameter:
247: u - vector with solution at initial time (global)
248: */
249: PetscErrorCode InitialConditions(Vec u,AppCtx *appctx)
250: {
251: PetscScalar *u_localptr,h = appctx->h,x;
252: PetscInt i,mybase,myend;
255: /*
256: Determine starting point of each processor's range of
257: grid values.
258: */
259: VecGetOwnershipRange(u,&mybase,&myend);
261: /*
262: Get a pointer to vector data.
263: - For default PETSc vectors, VecGetArray() returns a pointer to
264: the data array. Otherwise, the routine is implementation dependent.
265: - You MUST call VecRestoreArray() when you no longer need access to
266: the array.
267: - Note that the Fortran interface to VecGetArray() differs from the
268: C version. See the users manual for details.
269: */
270: VecGetArray(u,&u_localptr);
272: /*
273: We initialize the solution array by simply writing the solution
274: directly into the array locations. Alternatively, we could use
275: VecSetValues() or VecSetValuesLocal().
276: */
277: for (i=mybase; i<myend; i++) {
278: x = h*(PetscReal)i; /* current location in global grid */
279: u_localptr[i-mybase] = 1.0 + x*x;
280: }
282: /*
283: Restore vector
284: */
285: VecRestoreArray(u,&u_localptr);
287: /*
288: Print debugging information if desired
289: */
290: if (appctx->debug) {
291: PetscPrintf(appctx->comm,"initial guess vector\n");
292: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
293: }
295: return 0;
296: }
298: /* --------------------------------------------------------------------- */
301: /*
302: SetBounds - Sets the lower and uper bounds on the interior points
304: Input parameters:
305: xl - vector of lower bounds
306: xu - vector of upper bounds
307: ul - constant lower bound for all variables
308: uh - constant upper bound for all variables
309: appctx - Application context
310: */
311: PetscErrorCode SetBounds(Vec xl, Vec xu, PetscScalar ul, PetscScalar uh,AppCtx *appctx)
312: {
313: PetscErrorCode ierr;
314: const PetscScalar *l,*u;
315: PetscMPIInt rank,size;
316: PetscInt localsize;
319: VecSet(xl,ul);
320: VecSet(xu,uh);
321: VecGetLocalSize(xl,&localsize);
322: VecGetArrayRead(xl,&l);
323: VecGetArrayRead(xu,&u);
325: MPI_Comm_rank(appctx->comm,&rank);
326: MPI_Comm_size(appctx->comm,&size);
327: if (!rank) {
328: l[0] = -PETSC_INFINITY;
329: u[0] = PETSC_INFINITY;
330: }
331: if (rank == size-1) {
332: l[localsize-1] = -PETSC_INFINITY;
333: u[localsize-1] = PETSC_INFINITY;
334: }
335: VecRestoreArrayRead(xl,&l);
336: VecRestoreArrayRead(xu,&u);
337: return(0);
338: }
340: /* --------------------------------------------------------------------- */
343: /*
344: ExactSolution - Computes the exact solution at a given time.
346: Input Parameters:
347: t - current time
348: solution - vector in which exact solution will be computed
349: appctx - user-defined application context
351: Output Parameter:
352: solution - vector with the newly computed exact solution
353: */
354: PetscErrorCode ExactSolution(PetscReal t,Vec solution,AppCtx *appctx)
355: {
356: PetscScalar *s_localptr,h = appctx->h,x;
357: PetscInt i,mybase,myend;
360: /*
361: Determine starting and ending points of each processor's
362: range of grid values
363: */
364: VecGetOwnershipRange(solution,&mybase,&myend);
366: /*
367: Get a pointer to vector data.
368: */
369: VecGetArray(solution,&s_localptr);
371: /*
372: Simply write the solution directly into the array locations.
373: Alternatively, we could use VecSetValues() or VecSetValuesLocal().
374: */
375: for (i=mybase; i<myend; i++) {
376: x = h*(PetscReal)i;
377: s_localptr[i-mybase] = (t + 1.0)*(1.0 + x*x);
378: }
380: /*
381: Restore vector
382: */
383: VecRestoreArray(solution,&s_localptr);
384: return 0;
385: }
386: /* --------------------------------------------------------------------- */
389: /*
390: Monitor - User-provided routine to monitor the solution computed at
391: each timestep. This example plots the solution and computes the
392: error in two different norms.
394: Input Parameters:
395: ts - the timestep context
396: step - the count of the current step (with 0 meaning the
397: initial condition)
398: time - the current time
399: u - the solution at this timestep
400: ctx - the user-provided context for this monitoring routine.
401: In this case we use the application context which contains
402: information about the problem size, workspace and the exact
403: solution.
404: */
405: PetscErrorCode Monitor(TS ts,PetscInt step,PetscReal time,Vec u,void *ctx)
406: {
407: AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */
409: PetscReal en2,en2s,enmax;
410: PetscDraw draw;
412: /*
413: We use the default X windows viewer
414: PETSC_VIEWER_DRAW_(appctx->comm)
415: that is associated with the current communicator. This saves
416: the effort of calling PetscViewerDrawOpen() to create the window.
417: Note that if we wished to plot several items in separate windows we
418: would create each viewer with PetscViewerDrawOpen() and store them in
419: the application context, appctx.
421: PetscReal buffering makes graphics look better.
422: */
423: PetscViewerDrawGetDraw(PETSC_VIEWER_DRAW_(appctx->comm),0,&draw);
424: PetscDrawSetDoubleBuffer(draw);
425: VecView(u,PETSC_VIEWER_DRAW_(appctx->comm));
427: /*
428: Compute the exact solution at this timestep
429: */
430: ExactSolution(time,appctx->solution,appctx);
432: /*
433: Print debugging information if desired
434: */
435: if (appctx->debug) {
436: PetscPrintf(appctx->comm,"Computed solution vector\n");
437: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
438: PetscPrintf(appctx->comm,"Exact solution vector\n");
439: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
440: }
442: /*
443: Compute the 2-norm and max-norm of the error
444: */
445: VecAXPY(appctx->solution,-1.0,u);
446: VecNorm(appctx->solution,NORM_2,&en2);
447: en2s = PetscSqrtReal(appctx->h)*en2; /* scale the 2-norm by the grid spacing */
448: VecNorm(appctx->solution,NORM_MAX,&enmax);
450: /*
451: PetscPrintf() causes only the first processor in this
452: communicator to print the timestep information.
453: */
454: PetscPrintf(appctx->comm,"Timestep %D: time = %g,2-norm error = %g, max norm error = %g\n",step,(double)time,(double)en2s,(double)enmax);
456: /*
457: Print debugging information if desired
458: */
459: /* if (appctx->debug) {
460: PetscPrintf(appctx->comm,"Error vector\n");
461: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
462: } */
463: return 0;
464: }
465: /* --------------------------------------------------------------------- */
468: /*
469: RHSFunction - User-provided routine that evalues the right-hand-side
470: function of the ODE. This routine is set in the main program by
471: calling TSSetRHSFunction(). We compute:
472: global_out = F(global_in)
474: Input Parameters:
475: ts - timesteping context
476: t - current time
477: global_in - vector containing the current iterate
478: ctx - (optional) user-provided context for function evaluation.
479: In this case we use the appctx defined above.
481: Output Parameter:
482: global_out - vector containing the newly evaluated function
483: */
484: PetscErrorCode RHSFunction(TS ts,PetscReal t,Vec global_in,Vec global_out,void *ctx)
485: {
486: AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */
487: DM da = appctx->da; /* distributed array */
488: Vec local_in = appctx->u_local; /* local ghosted input vector */
489: Vec localwork = appctx->localwork; /* local ghosted work vector */
490: PetscErrorCode ierr;
491: PetscInt i,localsize;
492: PetscMPIInt rank,size;
493: PetscScalar *copyptr,sc;
494: const PetscScalar *localptr;
496: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
497: Get ready for local function computations
498: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
499: /*
500: Scatter ghost points to local vector, using the 2-step process
501: DMGlobalToLocalBegin(), DMGlobalToLocalEnd().
502: By placing code between these two statements, computations can be
503: done while messages are in transition.
504: */
505: DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
506: DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);
508: /*
509: Access directly the values in our local INPUT work array
510: */
511: VecGetArrayRead(local_in,&localptr);
513: /*
514: Access directly the values in our local OUTPUT work array
515: */
516: VecGetArray(localwork,©ptr);
518: sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));
520: /*
521: Evaluate our function on the nodes owned by this processor
522: */
523: VecGetLocalSize(local_in,&localsize);
525: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
526: Compute entries for the locally owned part
527: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
529: /*
530: Handle boundary conditions: This is done by using the boundary condition
531: u(t,boundary) = g(t,boundary)
532: for some function g. Now take the derivative with respect to t to obtain
533: u_{t}(t,boundary) = g_{t}(t,boundary)
535: In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1
536: and u(t,1) = 2t+ 2, so that u_{t}(t,1) = 2
537: */
538: MPI_Comm_rank(appctx->comm,&rank);
539: MPI_Comm_size(appctx->comm,&size);
540: if (!rank) copyptr[0] = 1.0;
541: if (rank == size-1) copyptr[localsize-1] = (t < .5) ? 2.0 : 0.0;
543: /*
544: Handle the interior nodes where the PDE is replace by finite
545: difference operators.
546: */
547: for (i=1; i<localsize-1; i++) copyptr[i] = localptr[i] * sc * (localptr[i+1] + localptr[i-1] - 2.0*localptr[i]);
549: /*
550: Restore vectors
551: */
552: VecRestoreArrayRead(local_in,&localptr);
553: VecRestoreArray(localwork,©ptr);
555: /*
556: Insert values from the local OUTPUT vector into the global
557: output vector
558: */
559: DMLocalToGlobalBegin(da,localwork,INSERT_VALUES,global_out);
560: DMLocalToGlobalEnd(da,localwork,INSERT_VALUES,global_out);
562: /* Print debugging information if desired */
563: /* if (appctx->debug) {
564: PetscPrintf(appctx->comm,"RHS function vector\n");
565: VecView(global_out,PETSC_VIEWER_STDOUT_WORLD);
566: } */
568: return 0;
569: }
570: /* --------------------------------------------------------------------- */
573: /*
574: RHSJacobian - User-provided routine to compute the Jacobian of
575: the nonlinear right-hand-side function of the ODE.
577: Input Parameters:
578: ts - the TS context
579: t - current time
580: global_in - global input vector
581: dummy - optional user-defined context, as set by TSetRHSJacobian()
583: Output Parameters:
584: AA - Jacobian matrix
585: BB - optionally different preconditioning matrix
586: str - flag indicating matrix structure
588: Notes:
589: RHSJacobian computes entries for the locally owned part of the Jacobian.
590: - Currently, all PETSc parallel matrix formats are partitioned by
591: contiguous chunks of rows across the processors.
592: - Each processor needs to insert only elements that it owns
593: locally (but any non-local elements will be sent to the
594: appropriate processor during matrix assembly).
595: - Always specify global row and columns of matrix entries when
596: using MatSetValues().
597: - Here, we set all entries for a particular row at once.
598: - Note that MatSetValues() uses 0-based row and column numbers
599: in Fortran as well as in C.
600: */
601: PetscErrorCode RHSJacobian(TS ts,PetscReal t,Vec global_in,Mat AA,Mat B,void *ctx)
602: {
603: AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */
604: Vec local_in = appctx->u_local; /* local ghosted input vector */
605: DM da = appctx->da; /* distributed array */
606: PetscScalar v[3],sc;
607: const PetscScalar *localptr;
608: PetscErrorCode ierr;
609: PetscInt i,mstart,mend,mstarts,mends,idx[3],is;
611: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
612: Get ready for local Jacobian computations
613: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
614: /*
615: Scatter ghost points to local vector, using the 2-step process
616: DMGlobalToLocalBegin(), DMGlobalToLocalEnd().
617: By placing code between these two statements, computations can be
618: done while messages are in transition.
619: */
620: DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
621: DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);
623: /*
624: Get pointer to vector data
625: */
626: VecGetArrayRead(local_in,&localptr);
628: /*
629: Get starting and ending locally owned rows of the matrix
630: */
631: MatGetOwnershipRange(B,&mstarts,&mends);
632: mstart = mstarts; mend = mends;
634: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
635: Compute entries for the locally owned part of the Jacobian.
636: - Currently, all PETSc parallel matrix formats are partitioned by
637: contiguous chunks of rows across the processors.
638: - Each processor needs to insert only elements that it owns
639: locally (but any non-local elements will be sent to the
640: appropriate processor during matrix assembly).
641: - Here, we set all entries for a particular row at once.
642: - We can set matrix entries either using either
643: MatSetValuesLocal() or MatSetValues().
644: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
646: /*
647: Set matrix rows corresponding to boundary data
648: */
649: if (mstart == 0) {
650: v[0] = 0.0;
651: MatSetValues(B,1,&mstart,1,&mstart,v,INSERT_VALUES);
652: mstart++;
653: }
654: if (mend == appctx->m) {
655: mend--;
656: v[0] = 0.0;
657: MatSetValues(B,1,&mend,1,&mend,v,INSERT_VALUES);
658: }
660: /*
661: Set matrix rows corresponding to interior data. We construct the
662: matrix one row at a time.
663: */
664: sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));
665: for (i=mstart; i<mend; i++) {
666: idx[0] = i-1; idx[1] = i; idx[2] = i+1;
667: is = i - mstart + 1;
668: v[0] = sc*localptr[is];
669: v[1] = sc*(localptr[is+1] + localptr[is-1] - 4.0*localptr[is]);
670: v[2] = sc*localptr[is];
671: MatSetValues(B,1,&i,3,idx,v,INSERT_VALUES);
672: }
674: /*
675: Restore vector
676: */
677: VecRestoreArrayRead(local_in,&localptr);
679: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
680: Complete the matrix assembly process and set some options
681: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
682: /*
683: Assemble matrix, using the 2-step process:
684: MatAssemblyBegin(), MatAssemblyEnd()
685: Computations can be done while messages are in transition
686: by placing code between these two statements.
687: */
688: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
689: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
691: /*
692: Set and option to indicate that we will never add a new nonzero location
693: to the matrix. If we do, it will generate an error.
694: */
695: MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);
697: return 0;
698: }