Actual source code: vscatfce.c
petsc-3.13.6 2020-09-29
1: #include <petsc/private/vecscatterimpl.h>
2: #if defined(PETSC_HAVE_CUDA)
3: #include <petsc/private/cudavecimpl.h>
4: #endif
5: /* ------------------------------------------------------------------*/
6: /*@
7: VecScatterGetMerged - Returns true if the scatter is completed in the VecScatterBegin()
8: and the VecScatterEnd() does nothing
10: Not Collective
12: Input Parameter:
13: . ctx - scatter context created with VecScatterCreate()
15: Output Parameter:
16: . flg - PETSC_TRUE if the VecScatterBegin/End() are all done during the VecScatterBegin()
18: Level: developer
20: .seealso: VecScatterCreate(), VecScatterEnd(), VecScatterBegin()
21: @*/
22: PetscErrorCode VecScatterGetMerged(VecScatter ctx,PetscBool *flg)
23: {
26: *flg = ctx->beginandendtogether;
27: return(0);
28: }
30: /*@
31: VecScatterBegin - Begins a generalized scatter from one vector to
32: another. Complete the scattering phase with VecScatterEnd().
34: Neighbor-wise Collective on VecScatter
36: Input Parameters:
37: + ctx - scatter context generated by VecScatterCreate()
38: . x - the vector from which we scatter
39: . y - the vector to which we scatter
40: . addv - either ADD_VALUES, MAX_VALUES, MIN_VALUES or INSERT_VALUES, with INSERT_VALUES mode any location
41: not scattered to retains its old value; i.e. the vector is NOT first zeroed.
42: - mode - the scattering mode, usually SCATTER_FORWARD. The available modes are:
43: SCATTER_FORWARD or SCATTER_REVERSE
46: Level: intermediate
48: Options Database: See VecScatterCreate()
50: Notes:
51: The vectors x and y need not be the same vectors used in the call
52: to VecScatterCreate(), but x must have the same parallel data layout
53: as that passed in as the x to VecScatterCreate(), similarly for the y.
54: Most likely they have been obtained from VecDuplicate().
56: You cannot change the values in the input vector between the calls to VecScatterBegin()
57: and VecScatterEnd().
59: If you use SCATTER_REVERSE the two arguments x and y should be reversed, from
60: the SCATTER_FORWARD.
62: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
64: This scatter is far more general than the conventional
65: scatter, since it can be a gather or a scatter or a combination,
66: depending on the indices ix and iy. If x is a parallel vector and y
67: is sequential, VecScatterBegin() can serve to gather values to a
68: single processor. Similarly, if y is parallel and x sequential, the
69: routine can scatter from one processor to many processors.
72: .seealso: VecScatterCreate(), VecScatterEnd()
73: @*/
74: PetscErrorCode VecScatterBegin(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
75: {
77: #if defined(PETSC_USE_DEBUG)
78: PetscInt to_n,from_n;
79: #endif
84: if (ctx->inuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
86: #if defined(PETSC_USE_DEBUG)
87: /*
88: Error checking to make sure these vectors match the vectors used
89: to create the vector scatter context. -1 in the from_n and to_n indicate the
90: vector lengths are unknown (for example with mapped scatters) and thus
91: no error checking is performed.
92: */
93: if (ctx->from_n >= 0 && ctx->to_n >= 0) {
94: VecGetLocalSize(x,&from_n);
95: VecGetLocalSize(y,&to_n);
96: if (mode & SCATTER_REVERSE) {
97: if (to_n != ctx->from_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector to != ctx from size)",to_n,ctx->from_n);
98: if (from_n != ctx->to_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector from != ctx to size)",from_n,ctx->to_n);
99: } else {
100: if (to_n != ctx->to_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter forward and vector to != ctx to size)",to_n,ctx->to_n);
101: if (from_n != ctx->from_n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter forward and vector from != ctx from size)",from_n,ctx->from_n);
102: }
103: }
104: #endif
106: ctx->inuse = PETSC_TRUE;
107: PetscLogEventBegin(VEC_ScatterBegin,ctx,x,y,0);
108: (*ctx->ops->begin)(ctx,x,y,addv,mode);
109: if (ctx->beginandendtogether && ctx->ops->end) {
110: ctx->inuse = PETSC_FALSE;
111: (*ctx->ops->end)(ctx,x,y,addv,mode);
112: }
113: PetscLogEventEnd(VEC_ScatterBegin,ctx,x,y,0);
114: return(0);
115: }
117: /* --------------------------------------------------------------------*/
118: /*@
119: VecScatterEnd - Ends a generalized scatter from one vector to another. Call
120: after first calling VecScatterBegin().
122: Neighbor-wise Collective on VecScatter
124: Input Parameters:
125: + ctx - scatter context generated by VecScatterCreate()
126: . x - the vector from which we scatter
127: . y - the vector to which we scatter
128: . addv - one of ADD_VALUES, MAX_VALUES, MIN_VALUES or INSERT_VALUES
129: - mode - the scattering mode, usually SCATTER_FORWARD. The available modes are:
130: SCATTER_FORWARD, SCATTER_REVERSE
132: Level: intermediate
134: Notes:
135: If you use SCATTER_REVERSE the arguments x and y should be reversed, from the SCATTER_FORWARD.
137: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
139: .seealso: VecScatterBegin(), VecScatterCreate()
140: @*/
141: PetscErrorCode VecScatterEnd(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
142: {
149: ctx->inuse = PETSC_FALSE;
150: if (!ctx->ops->end) return(0);
151: if (!ctx->beginandendtogether) {
152: PetscLogEventBegin(VEC_ScatterEnd,ctx,x,y,0);
153: (*(ctx)->ops->end)(ctx,x,y,addv,mode);
154: PetscLogEventEnd(VEC_ScatterEnd,ctx,x,y,0);
155: }
156: return(0);
157: }
159: /*@
160: VecScatterDestroy - Destroys a scatter context created by VecScatterCreate()
162: Collective on VecScatter
164: Input Parameter:
165: . ctx - the scatter context
167: Level: intermediate
169: .seealso: VecScatterCreate(), VecScatterCopy()
170: @*/
171: PetscErrorCode VecScatterDestroy(VecScatter *ctx)
172: {
176: if (!*ctx) return(0);
178: if ((*ctx)->inuse && ((PetscObject)(*ctx))->refct == 1) SETERRQ(((PetscObject)(*ctx))->comm,PETSC_ERR_ARG_WRONGSTATE,"Scatter context is in use");
179: if (--((PetscObject)(*ctx))->refct > 0) {*ctx = 0; return(0);}
181: /* if memory was published with SAWs then destroy it */
182: PetscObjectSAWsViewOff((PetscObject)(*ctx));
183: if ((*ctx)->ops->destroy) {(*(*ctx)->ops->destroy)(*ctx);}
184: #if defined(PETSC_HAVE_CUDA)
185: VecScatterCUDAIndicesDestroy((PetscCUDAIndices*)&((*ctx)->spptr));
186: #endif
187: PetscHeaderDestroy(ctx);
188: return(0);
189: }
191: /*@
192: VecScatterSetUp - Sets up the VecScatter to be able to actually scatter information between vectors
194: Collective on VecScatter
196: Input Parameter:
197: . ctx - the scatter context
199: Level: intermediate
201: .seealso: VecScatterCreate(), VecScatterCopy()
202: @*/
203: PetscErrorCode VecScatterSetUp(VecScatter ctx)
204: {
209: (*ctx->ops->setup)(ctx);
210: return(0);
211: }
213: /*@
214: VecScatterCopy - Makes a copy of a scatter context.
216: Collective on VecScatter
218: Input Parameter:
219: . sctx - the scatter context
221: Output Parameter:
222: . ctx - the context copy
224: Level: advanced
226: .seealso: VecScatterCreate(), VecScatterDestroy()
227: @*/
228: PetscErrorCode VecScatterCopy(VecScatter sctx,VecScatter *ctx)
229: {
231: VecScatterType type;
236: if (!sctx->ops->copy) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot copy this type");
237: PetscHeaderCreate(*ctx,VEC_SCATTER_CLASSID,"VecScatter","VecScatter","Vec",PetscObjectComm((PetscObject)sctx),VecScatterDestroy,VecScatterView);
238: (*ctx)->to_n = sctx->to_n;
239: (*ctx)->from_n = sctx->from_n;
240: (*sctx->ops->copy)(sctx,*ctx);
242: VecScatterGetType(sctx,&type);
243: PetscObjectChangeTypeName((PetscObject)(*ctx),type);
244: return(0);
245: }
247: /*@C
248: VecScatterViewFromOptions - View from Options
250: Collective on VecScatter
252: Input Parameters:
253: + A - the scatter context
254: . obj - Optional object
255: - name - command line option
257: Level: intermediate
258: .seealso: VecScatter, VecScatterView, PetscObjectViewFromOptions(), VecScatterCreate()
259: @*/
260: PetscErrorCode VecScatterViewFromOptions(VecScatter A,PetscObject obj,const char name[])
261: {
266: PetscObjectViewFromOptions((PetscObject)A,obj,name);
267: return(0);
268: }
270: /* ------------------------------------------------------------------*/
271: /*@C
272: VecScatterView - Views a vector scatter context.
274: Collective on VecScatter
276: Input Parameters:
277: + ctx - the scatter context
278: - viewer - the viewer for displaying the context
280: Level: intermediate
282: @*/
283: PetscErrorCode VecScatterView(VecScatter ctx,PetscViewer viewer)
284: {
289: if (!viewer) {
290: PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)ctx),&viewer);
291: }
293: if (ctx->ops->view) {
294: (*ctx->ops->view)(ctx,viewer);
295: }
296: return(0);
297: }
299: /*@C
300: VecScatterRemap - Remaps the "from" and "to" indices in a
301: vector scatter context. FOR EXPERTS ONLY!
303: Collective on VecScatter
305: Input Parameters:
306: + scat - vector scatter context
307: . tomap - remapping plan for "to" indices (may be NULL).
308: - frommap - remapping plan for "from" indices (may be NULL)
310: Level: developer
312: Notes:
313: In the parallel case the todata contains indices from where the data is taken
314: (and then sent to others)! The fromdata contains indices from where the received
315: data is finally put locally.
317: In the sequential case the todata contains indices from where the data is put
318: and the fromdata contains indices from where the data is taken from.
319: This is backwards from the paralllel case!
321: @*/
322: PetscErrorCode VecScatterRemap(VecScatter scat,PetscInt tomap[],PetscInt frommap[])
323: {
324: VecScatter_MPI_General *to,*from;
325: VecScatter_Seq_General *sgto,*sgfrom;
326: VecScatter_Seq_Stride *ssto;
327: PetscInt i,ierr;
334: if (scat->ops->remap) {
335: (*scat->ops->remap)(scat,tomap,frommap);
336: } else {
337: to = (VecScatter_MPI_General*)scat->todata;
338: from = (VecScatter_MPI_General*)scat->fromdata;
339: ssto = (VecScatter_Seq_Stride*)scat->todata;
340: sgto = (VecScatter_Seq_General*)scat->todata;
341: sgfrom = (VecScatter_Seq_General*)scat->fromdata;
343: /* remap indices from where we take/read data */
344: if (tomap) {
345: if (to->format == VEC_SCATTER_MPI_TOALL) {
346: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Not for to all scatter");
347: } else if (to->format == VEC_SCATTER_MPI_GENERAL) {
348: /* handle off processor parts */
349: for (i=0; i<to->starts[to->n]; i++) to->indices[i] = tomap[to->indices[i]];
351: /* handle local part */
352: for (i=0; i<to->local.n; i++) to->local.vslots[i] = tomap[to->local.vslots[i]];
354: /* the memcpy optimizations in vecscatter was based on index patterns it has.
355: They need to be recalculated when indices are changed (remapped).
356: */
357: VecScatterMemcpyPlanDestroy_PtoP(to,from);
358: VecScatterMemcpyPlanCreate_PtoP(to,from);
359: } else if (sgfrom->format == VEC_SCATTER_SEQ_GENERAL) {
360: /* remap indices*/
361: for (i=0; i<sgfrom->n; i++) sgfrom->vslots[i] = tomap[sgfrom->vslots[i]];
362: /* update optimizations, which happen when it is a Stride1toSG, SGtoStride1 or SGToSG vecscatter */
363: if (ssto->format == VEC_SCATTER_SEQ_STRIDE && ssto->step == 1) {
364: PetscInt tmp[2];
365: tmp[0] = 0; tmp[1] = sgfrom->n;
366: VecScatterMemcpyPlanDestroy(&sgfrom->memcpy_plan);
367: VecScatterMemcpyPlanCreate_Index(1,tmp,sgfrom->vslots,1/*bs*/,&sgfrom->memcpy_plan);
368: } else if (sgto->format == VEC_SCATTER_SEQ_GENERAL) {
369: VecScatterMemcpyPlanDestroy(&sgto->memcpy_plan);
370: VecScatterMemcpyPlanDestroy(&sgfrom->memcpy_plan);
371: VecScatterMemcpyPlanCreate_SGToSG(1/*bs*/,sgto,sgfrom);
372: }
373: } else if (sgfrom->format == VEC_SCATTER_SEQ_STRIDE) {
374: VecScatter_Seq_Stride *ssto = (VecScatter_Seq_Stride*)sgfrom;
376: /* if the remapping is the identity and stride is identity then skip remap */
377: if (ssto->step == 1 && ssto->first == 0) {
378: for (i=0; i<ssto->n; i++) {
379: if (tomap[i] != i) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
380: }
381: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
382: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
383: }
384: }
385: if (frommap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unable to remap the FROM in scatters yet");
387: /*
388: Mark then vector lengths as unknown because we do not know the
389: lengths of the remapped vectors
390: */
391: scat->from_n = -1;
392: scat->to_n = -1;
393: return(0);
394: }
396: /* Given a parallel VecScatter context, return number of procs and vector entries involved in remote (i.e., off-process) communication
398: Input Parameters:
399: + ctx - the context (must be a parallel vecscatter)
400: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
402: Output parameters:
403: + num_procs - number of remote processors
404: - num_entries - number of vector entries to send or recv
407: .seealso: VecScatterGetRemote_Private(), VecScatterGetRemoteOrdered_Private()
409: Notes:
410: Sometimes PETSc internally needs to use the matrix-vector-multiply vecscatter context for other purposes. The client code
411: usually only uses MPI_Send/Recv. This group of subroutines provides info needed for such uses.
412: */
413: PetscErrorCode VecScatterGetRemoteCount_Private(VecScatter ctx,PetscBool send,PetscInt *num_procs,PetscInt *num_entries)
414: {
415: VecScatter_MPI_General *vs;
416: PetscBool par;
417: PetscErrorCode ierr;
420: if (ctx->ops->getremotecount) {
421: (*ctx->ops->getremotecount)(ctx,send,num_procs,num_entries);
422: } else {
423: vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
424: par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
425: if (num_procs) *num_procs = par ? vs->n : 0;
426: if (num_entries) *num_entries = par ? vs->starts[vs->n] : 0;
427: }
428: return(0);
429: }
431: /* Given a parallel VecScatter context, return a plan that represents the remote communication.
432: Any output parameter can be NULL.
434: Input Parameters:
435: + ctx - the context
436: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
438: Output parameters:
439: + n - number of remote processors
440: . starts - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
441: Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
442: expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
443: with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
444: . indices - indices of entries to send/recv
445: . procs - ranks of remote processors
446: - bs - block size
448: .seealso: VecScatterRestoreRemote_Private(), VecScatterGetRemoteOrdered_Private()
449: */
450: PetscErrorCode VecScatterGetRemote_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
451: {
452: VecScatter_MPI_General *vs;
453: PetscBool par;
454: PetscErrorCode ierr;
457: if (ctx->ops->getremote) {
458: (*ctx->ops->getremote)(ctx,send,n,starts,indices,procs,bs);
459: } else {
460: vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
461: par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
462: if (n) *n = par ? vs->n : 0;
463: if (indices) *indices = par ? vs->indices : NULL;
464: if (starts) *starts = par ? vs->starts : NULL;
465: if (procs) *procs = par ? vs->procs : NULL;
466: if (bs) *bs = par ? vs->bs : 0;
467: }
468: return(0);
469: }
472: /* Given a parallel VecScatter context, return a plan that represents the remote communication. Ranks of remote
473: processors returned in procs must be sorted in ascending order. Any output parameter can be NULL.
475: Input Parameters:
476: + ctx - the context
477: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
479: Output parameters:
480: + n - number of remote processors
481: . starts - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
482: Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
483: expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
484: with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
485: . indices - indices of entries to send/recv
486: . procs - ranks of remote processors
487: - bs - block size
489: .seealso: VecScatterRestoreRemoteOrdered_Private(), VecScatterGetRemote_Private()
491: Notes:
492: Output parameters like starts, indices must also be adapted according to the sorted ranks.
493: */
494: PetscErrorCode VecScatterGetRemoteOrdered_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
495: {
496: VecScatter_MPI_General *vs;
497: PetscBool par;
498: PetscErrorCode ierr;
501: if (ctx->ops->getremoteordered) {
502: (*ctx->ops->getremoteordered)(ctx,send,n,starts,indices,procs,bs);
503: } else {
504: vs = (VecScatter_MPI_General*)(send ? ctx->todata : ctx->fromdata);
505: par = (vs->format == VEC_SCATTER_MPI_GENERAL)? PETSC_TRUE : PETSC_FALSE;
506: if (n) *n = par ? vs->n : 0;
507: if (indices) *indices = par ? vs->indices : NULL;
508: if (starts) *starts = par ? vs->starts : NULL;
509: if (procs) *procs = par ? vs->procs : NULL;
510: if (bs) *bs = par ? vs->bs : 0;
511: }
512: #if defined(PETSC_USE_DEBUG)
513: if (n && procs) {
514: PetscInt i;
515: /* from back to front to also handle cases *n=0 */
516: for (i=*n-1; i>0; i--) { if ((*procs)[i-1] > (*procs)[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"procs[] are not ordered"); }
517: }
518: #endif
519: return(0);
520: }
522: /* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemote_Private. This gives a chance for
523: an implementation to free memory allocated in the VecScatterGetRemote_Private call.
525: Input Parameters:
526: + ctx - the context
527: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
529: Output parameters:
530: + n - number of remote processors
531: . starts - starting point in indices for each proc
532: . indices - indices of entries to send/recv
533: . procs - ranks of remote processors
534: - bs - block size
536: .seealso: VecScatterGetRemote_Private()
537: */
538: PetscErrorCode VecScatterRestoreRemote_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
539: {
540: PetscErrorCode ierr;
543: if (ctx->ops->restoreremote) {
544: (*ctx->ops->restoreremote)(ctx,send,n,starts,indices,procs,bs);
545: } else {
546: if (starts) *starts = NULL;
547: if (indices) *indices = NULL;
548: if (procs) *procs = NULL;
549: }
550: return(0);
551: }
553: /* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemoteOrdered_Private. This gives a chance for
554: an implementation to free memory allocated in the VecScatterGetRemoteOrdered_Private call.
556: Input Parameters:
557: + ctx - the context
558: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
560: Output parameters:
561: + n - number of remote processors
562: . starts - starting point in indices for each proc
563: . indices - indices of entries to send/recv
564: . procs - ranks of remote processors
565: - bs - block size
567: .seealso: VecScatterGetRemoteOrdered_Private()
568: */
569: PetscErrorCode VecScatterRestoreRemoteOrdered_Private(VecScatter ctx,PetscBool send,PetscInt *n,const PetscInt **starts,const PetscInt **indices,const PetscMPIInt **procs,PetscInt *bs)
570: {
573: if (ctx->ops->restoreremoteordered) {
574: (*ctx->ops->restoreremoteordered)(ctx,send,n,starts,indices,procs,bs);
575: } else {
576: VecScatterRestoreRemote_Private(ctx,send,n,starts,indices,procs,bs);
577: }
578: return(0);
579: }
581: #if defined(PETSC_HAVE_CUDA)
583: /*@C
584: VecScatterInitializeForGPU - Initializes a generalized scatter from one vector
585: to another for GPU based computation.
587: Input Parameters:
588: + inctx - scatter context generated by VecScatterCreate()
589: - x - the vector from which we scatter
591: Level: intermediate
593: Notes:
594: Effectively, this function creates all the necessary indexing buffers and work
595: vectors needed to move only those data points in a vector which need to
596: be communicated across ranks. This is done at the first time this function is
597: called. Currently, this only used in the context of the parallel SpMV call in
598: MatMult_MPIAIJCUSPARSE.
600: This function is executed before the call to MatMult. This enables the memory
601: transfers to be overlapped with the MatMult SpMV kernel call.
603: .seealso: VecScatterFinalizeForGPU(), VecScatterCreate(), VecScatterEnd()
604: @*/
605: PETSC_EXTERN PetscErrorCode VecScatterInitializeForGPU(VecScatter inctx,Vec x)
606: {
608: PetscInt i,nrecvs,nsends,sbs,rbs,ns,nr;
609: const PetscInt *sstarts,*rstarts,*sindices,*rindices;
610: VecScatterType type;
611: PetscBool isSF;
614: VecScatterGetType(inctx,&type);
615: PetscStrcmp(type,VECSCATTERSF,&isSF);
616: if (isSF) return(0);
618: VecScatterGetRemote_Private(inctx,PETSC_TRUE/*send*/, &nsends,&sstarts,&sindices,NULL/*procs*/,&sbs);
619: VecScatterGetRemote_Private(inctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,&rindices,NULL/*procs*/,&rbs);
620: ns = nsends ? sstarts[nsends]-sstarts[0] : 0; /* s/rstarts[0] is not necessarily zero */
621: nr = nrecvs ? rstarts[nrecvs]-rstarts[0] : 0;
623: if (x->offloadmask != PETSC_OFFLOAD_UNALLOCATED && (nsends>0 || nrecvs>0)) {
624: if (!inctx->spptr) {
625: PetscInt k,*tindicesSends,*sindicesSends,*tindicesRecvs,*sindicesRecvs;
626: /* Here we create indices for both the senders and receivers. */
627: PetscMalloc1(ns,&tindicesSends);
628: PetscMalloc1(nr,&tindicesRecvs);
630: /* s/rindices and s/rstarts could be NULL when ns or nr is zero */
631: if (ns) {PetscArraycpy(tindicesSends,&sindices[sstarts[0]],ns);}
632: if (nr) {PetscArraycpy(tindicesRecvs,&rindices[rstarts[0]],nr);}
634: PetscSortRemoveDupsInt(&ns,tindicesSends);
635: PetscSortRemoveDupsInt(&nr,tindicesRecvs);
637: PetscMalloc1(sbs*ns,&sindicesSends);
638: PetscMalloc1(rbs*nr,&sindicesRecvs);
640: /* sender indices */
641: for (i=0; i<ns; i++) {
642: for (k=0; k<sbs; k++) sindicesSends[i*sbs+k] = tindicesSends[i]+k;
643: }
644: PetscFree(tindicesSends);
646: /* receiver indices */
647: for (i=0; i<nr; i++) {
648: for (k=0; k<rbs; k++) sindicesRecvs[i*rbs+k] = tindicesRecvs[i]+k;
649: }
650: PetscFree(tindicesRecvs);
652: /* create GPU indices, work vectors, ... */
653: VecScatterCUDAIndicesCreate_PtoP(ns*sbs,sindicesSends,nr*rbs,sindicesRecvs,(PetscCUDAIndices*)&inctx->spptr);
654: PetscFree(sindicesSends);
655: PetscFree(sindicesRecvs);
656: }
657: }
658: return(0);
659: }
661: /*@C
662: VecScatterFinalizeForGPU - Finalizes a generalized scatter from one vector to
663: another for GPU based computation.
665: Input Parameter:
666: . inctx - scatter context generated by VecScatterCreate()
668: Level: intermediate
670: Notes:
671: Effectively, this function resets the temporary buffer flags. Currently, this
672: only used in the context of the parallel SpMV call in in MatMult_MPIAIJCUSPARSE
673: Once the MatMultAdd is finished, the GPU temporary buffers used for messaging are no longer valid.
675: .seealso: VecScatterInitializeForGPU(), VecScatterCreate(), VecScatterEnd()
676: @*/
677: PETSC_EXTERN PetscErrorCode VecScatterFinalizeForGPU(VecScatter inctx)
678: {
680: return(0);
681: }
683: #endif