Actual source code: ex8.c
2: static char help[] = "Demonstrates using a local ordering to set values into a parallel vector.\n\n";
4: /*
5: Include "petscvec.h" so that we can use vectors. Note that this file
6: automatically includes:
7: petscsys.h - base PETSc routines petscis.h - index sets
8: petscviewer.h - viewers
9: */
10: #include <petscvec.h>
12: int main(int argc,char **argv)
13: {
14: PetscMPIInt rank;
15: PetscInt i,ng,*gindices,rstart,rend,M;
16: PetscScalar one = 1.0;
17: Vec x;
19: PetscInitialize(&argc,&argv,(char*)0,help);
20: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
22: /*
23: Create a parallel vector.
24: - In this case, we specify the size of each processor's local
25: portion, and PETSc computes the global size. Alternatively,
26: PETSc could determine the vector's distribution if we specify
27: just the global size.
28: */
29: VecCreate(PETSC_COMM_WORLD,&x);
30: VecSetSizes(x,rank+1,PETSC_DECIDE);
31: VecSetFromOptions(x);
32: VecSet(x,one);
34: /*
35: Set the local to global ordering for the vector. Each processor
36: generates a list of the global indices for each local index. Note that
37: the local indices are just whatever is convenient for a particular application.
38: In this case we treat the vector as lying on a one dimensional grid and
39: have one ghost point on each end of the blocks owned by each processor.
40: */
42: VecGetSize(x,&M);
43: VecGetOwnershipRange(x,&rstart,&rend);
44: ng = rend - rstart + 2;
45: PetscMalloc1(ng,&gindices);
46: gindices[0] = rstart - 1;
47: for (i=0; i<ng-1; i++) gindices[i+1] = gindices[i] + 1;
48: /* map the first and last point as periodic */
49: if (gindices[0] == -1) gindices[0] = M - 1;
50: if (gindices[ng-1] == M) gindices[ng-1] = 0;
51: {
52: ISLocalToGlobalMapping ltog;
53: ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,1,ng,gindices,PETSC_COPY_VALUES,<og);
54: VecSetLocalToGlobalMapping(x,ltog);
55: ISLocalToGlobalMappingDestroy(<og);
56: }
57: PetscFree(gindices);
59: /*
60: Set the vector elements.
61: - In this case set the values using the local ordering
62: - Each processor can contribute any vector entries,
63: regardless of which processor "owns" them; any nonlocal
64: contributions will be transferred to the appropriate processor
65: during the assembly process.
66: - In this example, the flag ADD_VALUES indicates that all
67: contributions will be added together.
68: */
69: for (i=0; i<ng; i++) {
70: VecSetValuesLocal(x,1,&i,&one,ADD_VALUES);
71: }
73: /*
74: Assemble vector, using the 2-step process:
75: VecAssemblyBegin(), VecAssemblyEnd()
76: Computations can be done while messages are in transition
77: by placing code between these two statements.
78: */
79: VecAssemblyBegin(x);
80: VecAssemblyEnd(x);
82: /*
83: View the vector; then destroy it.
84: */
85: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
86: VecDestroy(&x);
88: PetscFinalize();
89: return 0;
90: }
92: /*TEST
94: test:
95: nsize: 4
97: TEST*/