Actual source code: vecmpitoseq.c
petsc-3.11.4 2019-09-28
2: #include <petsc/private/vecimpl.h>
4: /*@C
5: VecScatterCreateToAll - Creates a vector and a scatter context that copies all
6: vector values to each processor
8: Collective on Vec
10: Input Parameter:
11: . vin - input MPIVEC
13: Output Parameter:
14: + ctx - scatter context
15: - vout - output SEQVEC that is large enough to scatter into
17: Level: intermediate
19: Note: vout may be NULL [PETSC_NULL_VEC from fortran] if you do not
20: need to have it created
22: Usage:
23: $ VecScatterCreateToAll(vin,&ctx,&vout);
24: $
25: $ // scatter as many times as you need
26: $ VecScatterBegin(ctx,vin,vout,INSERT_VALUES,SCATTER_FORWARD);
27: $ VecScatterEnd(ctx,vin,vout,INSERT_VALUES,SCATTER_FORWARD);
28: $
29: $ // destroy scatter context and local vector when no longer needed
30: $ VecScatterDestroy(&ctx);
31: $ VecDestroy(&vout);
33: Do NOT create a vector and then pass it in as the final argument vout! vout is created by this routine
34: automatically (unless you pass NULL in for that argument if you do not need it).
36: .seealso VecScatterCreate(), VecScatterCreateToZero(), VecScatterBegin(), VecScatterEnd()
38: @*/
39: PetscErrorCode VecScatterCreateToAll(Vec vin,VecScatter *ctx,Vec *vout)
40: {
43: PetscInt N;
44: IS is;
45: Vec tmp;
46: Vec *tmpv;
47: PetscBool tmpvout = PETSC_FALSE;
53: if (vout) {
55: tmpv = vout;
56: } else {
57: tmpvout = PETSC_TRUE;
58: tmpv = &tmp;
59: }
61: /* Create seq vec on each proc, with the same size of the original mpi vec */
62: VecGetSize(vin,&N);
63: VecCreateSeq(PETSC_COMM_SELF,N,tmpv);
64: /* Create the VecScatter ctx with the communication info */
65: ISCreateStride(PETSC_COMM_SELF,N,0,1,&is);
66: VecScatterCreate(vin,is,*tmpv,is,ctx);
67: ISDestroy(&is);
68: if (tmpvout) {VecDestroy(tmpv);}
69: return(0);
70: }
73: /*@C
74: VecScatterCreateToZero - Creates an output vector and a scatter context used to
75: copy all vector values into the output vector on the zeroth processor
77: Collective on Vec
79: Input Parameter:
80: . vin - input MPIVEC
82: Output Parameter:
83: + ctx - scatter context
84: - vout - output SEQVEC that is large enough to scatter into on processor 0 and
85: of length zero on all other processors
87: Level: intermediate
89: Note: vout may be NULL [PETSC_NULL_VEC from fortran] if you do not
90: need to have it created
92: Usage:
93: $ VecScatterCreateToZero(vin,&ctx,&vout);
94: $
95: $ // scatter as many times as you need
96: $ VecScatterBegin(ctx,vin,vout,INSERT_VALUES,SCATTER_FORWARD);
97: $ VecScatterEnd(ctx,vin,vout,INSERT_VALUES,SCATTER_FORWARD);
98: $
99: $ // destroy scatter context and local vector when no longer needed
100: $ VecScatterDestroy(&ctx);
101: $ VecDestroy(&vout);
103: .seealso VecScatterCreate(), VecScatterCreateToAll(), VecScatterBegin(), VecScatterEnd()
105: Do NOT create a vector and then pass it in as the final argument vout! vout is created by this routine
106: automatically (unless you pass NULL in for that argument if you do not need it).
108: @*/
109: PetscErrorCode VecScatterCreateToZero(Vec vin,VecScatter *ctx,Vec *vout)
110: {
113: PetscInt N;
114: PetscMPIInt rank;
115: IS is;
116: Vec tmp;
117: Vec *tmpv;
118: PetscBool tmpvout = PETSC_FALSE;
124: if (vout) {
126: tmpv = vout;
127: } else {
128: tmpvout = PETSC_TRUE;
129: tmpv = &tmp;
130: }
132: /* Create vec on each proc, with the same size of the original mpi vec (all on process 0)*/
133: VecGetSize(vin,&N);
134: MPI_Comm_rank(PetscObjectComm((PetscObject)vin),&rank);
135: if (rank) N = 0;
136: VecCreateSeq(PETSC_COMM_SELF,N,tmpv);
137: /* Create the VecScatter ctx with the communication info */
138: ISCreateStride(PETSC_COMM_SELF,N,0,1,&is);
139: VecScatterCreate(vin,is,*tmpv,is,ctx);
140: ISDestroy(&is);
141: if (tmpvout) {VecDestroy(tmpv);}
142: return(0);
143: }
145: #include <petscsf.h>
147: /*@
148: PetscSFCreateFromZero - Create a PetscSF that maps a Vec from sequential to distributed
150: Input Parameters:
151: . gv - A distributed Vec
153: Output Parameters:
154: . sf - The SF created mapping a sequential Vec to gv
156: Level: developer
158: .seealso: DMPlexDistributedToSequential()
159: @*/
160: PetscErrorCode PetscSFCreateFromZero(MPI_Comm comm, Vec gv, PetscSF *sf)
161: {
162: PetscSFNode *remotenodes;
163: PetscInt *localnodes;
164: PetscInt N, n, start, numroots, l;
165: PetscMPIInt rank;
169: PetscSFCreate(comm, sf);
170: VecGetSize(gv, &N);
171: VecGetLocalSize(gv, &n);
172: VecGetOwnershipRange(gv, &start, NULL);
173: MPI_Comm_rank(comm, &rank);
174: PetscMalloc1(n, &localnodes);
175: PetscMalloc1(n, &remotenodes);
176: if (!rank) numroots = N;
177: else numroots = 0;
178: for (l = 0; l < n; ++l) {
179: localnodes[l] = l;
180: remotenodes[l].rank = 0;
181: remotenodes[l].index = l+start;
182: }
183: PetscSFSetGraph(*sf, numroots, n, localnodes, PETSC_OWN_POINTER, remotenodes, PETSC_OWN_POINTER);
184: return(0);
185: }