Actual source code: vscat.c
1: #include <petsc/private/sfimpl.h>
2: #include <../src/vec/is/sf/impls/basic/sfbasic.h>
3: #include <../src/vec/is/sf/impls/basic/sfpack.h>
4: #include <petsc/private/vecimpl.h>
6: typedef enum {
7: IS_INVALID,
8: IS_GENERAL,
9: IS_BLOCK,
10: IS_STRIDE
11: } ISTypeID;
13: static inline PetscErrorCode ISGetTypeID_Private(IS is, ISTypeID *id)
14: {
15: PetscBool same;
17: PetscFunctionBegin;
18: *id = IS_INVALID;
19: PetscCall(PetscObjectTypeCompare((PetscObject)is, ISGENERAL, &same));
20: if (same) {
21: *id = IS_GENERAL;
22: goto functionend;
23: }
24: PetscCall(PetscObjectTypeCompare((PetscObject)is, ISBLOCK, &same));
25: if (same) {
26: *id = IS_BLOCK;
27: goto functionend;
28: }
29: PetscCall(PetscObjectTypeCompare((PetscObject)is, ISSTRIDE, &same));
30: if (same) {
31: *id = IS_STRIDE;
32: goto functionend;
33: }
34: functionend:
35: PetscFunctionReturn(PETSC_SUCCESS);
36: }
38: static PetscErrorCode VecScatterBegin_Internal(VecScatter sf, Vec x, Vec y, InsertMode addv, ScatterMode mode)
39: {
40: PetscSF wsf = NULL; /* either sf or its local part */
41: MPI_Op mop = MPI_OP_NULL;
42: PetscMPIInt size;
43: PetscMemType xmtype = PETSC_MEMTYPE_HOST, ymtype = PETSC_MEMTYPE_HOST;
45: PetscFunctionBegin;
46: if (x != y) PetscCall(VecLockReadPush(x));
47: PetscCall(VecGetArrayReadAndMemType(x, &sf->vscat.xdata, &xmtype));
48: PetscCall(VecGetArrayAndMemType(y, &sf->vscat.ydata, &ymtype));
49: PetscCall(VecLockWriteSet(y, PETSC_TRUE));
51: /* SCATTER_FORWARD_LOCAL indicates ignoring inter-process communication */
52: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
53: if ((mode & SCATTER_FORWARD_LOCAL) && size > 1) { /* Lazy creation of sf->vscat.lsf since SCATTER_FORWARD_LOCAL is uncommon */
54: if (!sf->vscat.lsf) PetscCall(PetscSFCreateLocalSF_Private(sf, &sf->vscat.lsf));
55: wsf = sf->vscat.lsf;
56: } else {
57: wsf = sf;
58: }
60: /* Note xdata/ydata is always recorded on sf (not lsf) above */
61: if (addv == INSERT_VALUES) mop = MPI_REPLACE;
62: else if (addv == ADD_VALUES) mop = MPIU_SUM; /* Petsc defines its own MPI datatype and SUM operation for __float128 etc. */
63: else if (addv == MAX_VALUES) mop = MPIU_MAX;
64: else if (addv == MIN_VALUES) mop = MPIU_MIN;
65: else SETERRQ(PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "Unsupported InsertMode %d in VecScatterBegin/End", addv);
67: if (mode & SCATTER_REVERSE) { /* REVERSE indicates leaves to root scatter. Note that x and y are swapped in input */
68: PetscCall(PetscSFReduceWithMemTypeBegin(wsf, sf->vscat.unit, xmtype, sf->vscat.xdata, ymtype, sf->vscat.ydata, mop));
69: } else { /* FORWARD indicates x to y scatter, where x is root and y is leaf */
70: PetscCall(PetscSFBcastWithMemTypeBegin(wsf, sf->vscat.unit, xmtype, sf->vscat.xdata, ymtype, sf->vscat.ydata, mop));
71: }
72: PetscFunctionReturn(PETSC_SUCCESS);
73: }
75: static PetscErrorCode VecScatterEnd_Internal(VecScatter sf, Vec x, Vec y, InsertMode addv, ScatterMode mode)
76: {
77: PetscSF wsf = NULL;
78: MPI_Op mop = MPI_OP_NULL;
79: PetscMPIInt size;
81: PetscFunctionBegin;
82: /* SCATTER_FORWARD_LOCAL indicates ignoring inter-process communication */
83: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
84: wsf = ((mode & SCATTER_FORWARD_LOCAL) && size > 1) ? sf->vscat.lsf : sf;
86: if (addv == INSERT_VALUES) mop = MPI_REPLACE;
87: else if (addv == ADD_VALUES) mop = MPIU_SUM;
88: else if (addv == MAX_VALUES) mop = MPIU_MAX;
89: else if (addv == MIN_VALUES) mop = MPIU_MIN;
90: else SETERRQ(PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "Unsupported InsertMode %d in VecScatterBegin/End", addv);
92: if (mode & SCATTER_REVERSE) { /* reverse scatter sends leaves to roots. Note that x and y are swapped in input */
93: PetscCall(PetscSFReduceEnd(wsf, sf->vscat.unit, sf->vscat.xdata, sf->vscat.ydata, mop));
94: } else { /* forward scatter sends roots to leaves, i.e., x to y */
95: PetscCall(PetscSFBcastEnd(wsf, sf->vscat.unit, sf->vscat.xdata, sf->vscat.ydata, mop));
96: }
98: PetscCall(VecRestoreArrayReadAndMemType(x, &sf->vscat.xdata));
99: if (x != y) PetscCall(VecLockReadPop(x));
100: PetscCall(VecRestoreArrayAndMemType(y, &sf->vscat.ydata));
101: PetscCall(VecLockWriteSet(y, PETSC_FALSE));
102: PetscFunctionReturn(PETSC_SUCCESS);
103: }
105: /* VecScatterRemap provides a light way to slightly modify a VecScatter. Suppose the input sf scatters
106: x[i] to y[j], tomap gives a plan to change vscat to scatter x[tomap[i]] to y[j]. Note that in SF,
107: x is roots. That means we need to change incoming stuffs such as bas->irootloc[].
108: */
109: static PetscErrorCode VecScatterRemap_Internal(VecScatter sf, const PetscInt *tomap, const PetscInt *frommap)
110: {
111: PetscInt i, bs = sf->vscat.bs;
112: PetscMPIInt size;
113: PetscBool ident = PETSC_TRUE, isbasic, isneighbor;
114: PetscSFType type;
115: PetscSF_Basic *bas = NULL;
117: PetscFunctionBegin;
118: /* check if it is an identity map. If it is, do nothing */
119: if (tomap) {
120: for (i = 0; i < sf->nroots * bs; i++) {
121: if (i != tomap[i]) {
122: ident = PETSC_FALSE;
123: break;
124: }
125: }
126: if (ident) PetscFunctionReturn(PETSC_SUCCESS);
127: }
128: PetscCheck(!frommap, PETSC_COMM_SELF, PETSC_ERR_SUP, "Unable to remap the FROM in scatters yet");
129: if (!tomap) PetscFunctionReturn(PETSC_SUCCESS);
131: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
133: /* Since the indices changed, we must also update the local SF. But we do not do it since
134: lsf is rarely used. We just destroy lsf and rebuild it on demand from updated sf.
135: */
136: if (sf->vscat.lsf) PetscCall(PetscSFDestroy(&sf->vscat.lsf));
138: PetscCall(PetscSFGetType(sf, &type));
139: PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFBASIC, &isbasic));
140: PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFNEIGHBOR, &isneighbor));
141: PetscCheck(isbasic || isneighbor, PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "VecScatterRemap on SF type %s is not supported", type);
143: PetscCall(PetscSFSetUp(sf)); /* to build sf->irootloc if SetUp is not yet called */
145: /* Root indices are going to be remapped. This is tricky for SF. Root indices are used in sf->rremote,
146: sf->remote and bas->irootloc. The latter one is cheap to remap, but the former two are not.
147: To remap them, we have to do a bcast from roots to leaves, to let leaves know their updated roots.
148: Since VecScatterRemap is supposed to be a cheap routine to adapt a vecscatter by only changing where
149: x[] data is taken, we do not remap sf->rremote, sf->remote. The consequence is that operations
150: accessing them (such as PetscSFCompose) may get stale info. Considering VecScatter does not need
151: that complicated SF operations, we do not remap sf->rremote, sf->remote, instead we destroy them
152: so that code accessing them (if any) will crash (instead of get silent errors). Note that BcastAndOp/Reduce,
153: which are used by VecScatter and only rely on bas->irootloc, are updated and correct.
154: */
155: sf->remote = NULL;
156: PetscCall(PetscFree(sf->remote_alloc));
157: /* Not easy to free sf->rremote since it was allocated with PetscMalloc4(), so just give it crazy values */
158: for (i = 0; i < sf->roffset[sf->nranks]; i++) sf->rremote[i] = PETSC_MIN_INT;
160: /* Indices in tomap[] are for each individual vector entry. But indices in sf are for each
161: block in the vector. So before the remapping, we have to expand indices in sf by bs, and
162: after the remapping, we have to shrink them back.
163: */
164: bas = (PetscSF_Basic *)sf->data;
165: for (i = 0; i < bas->ioffset[bas->niranks]; i++) bas->irootloc[i] = tomap[bas->irootloc[i] * bs] / bs;
166: #if defined(PETSC_HAVE_DEVICE)
167: /* Free the irootloc copy on device. We allocate a new copy and get the updated value on demand. See PetscSFLinkGetRootPackOptAndIndices() */
168: for (i = 0; i < 2; i++) PetscCall(PetscSFFree(sf, PETSC_MEMTYPE_DEVICE, bas->irootloc_d[i]));
169: #endif
170: /* Destroy and then rebuild root packing optimizations since indices are changed */
171: PetscCall(PetscSFResetPackFields(sf));
172: PetscCall(PetscSFSetUpPackFields(sf));
173: PetscFunctionReturn(PETSC_SUCCESS);
174: }
176: /*
177: Given a parallel VecScatter context, return number of procs and vector entries involved in remote (i.e., off-process) communication
179: Input Parameters:
180: + sf - the context (must be a parallel vecscatter)
181: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
183: Output parameters:
184: + num_procs - number of remote processors
185: - num_entries - number of vector entries to send or recv
187: Notes:
188: Sometimes PETSc internally needs to use the matrix-vector-multiply vecscatter context for other purposes. The client code
189: usually only uses MPI_Send/Recv. This group of subroutines provides info needed for such uses.
191: .seealso: [](sec_scatter), `VecScatterGetRemote_Private()`, `VecScatterGetRemoteOrdered_Private()`
192: */
193: PetscErrorCode VecScatterGetRemoteCount_Private(VecScatter sf, PetscBool send, PetscInt *num_procs, PetscInt *num_entries)
194: {
195: PetscInt nranks, remote_start;
196: PetscMPIInt rank;
197: const PetscInt *offset;
198: const PetscMPIInt *ranks;
200: PetscFunctionBegin;
201: PetscCall(PetscSFSetUp(sf));
202: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
204: /* This routine is mainly used for MatMult's Mvctx. In Mvctx, we scatter an MPI vector x to a sequential vector lvec.
205: Remember x is roots and lvec is leaves. 'send' means roots to leaves communication. If 'send' is true, we need to
206: get info about which ranks this processor needs to send to. In other words, we need to call PetscSFGetLeafRanks().
207: If send is false, we do the opposite, calling PetscSFGetRootRanks().
208: */
209: if (send) PetscCall(PetscSFGetLeafRanks(sf, &nranks, &ranks, &offset, NULL));
210: else PetscCall(PetscSFGetRootRanks(sf, &nranks, &ranks, &offset, NULL, NULL));
211: if (nranks) {
212: remote_start = (rank == ranks[0]) ? 1 : 0;
213: if (num_procs) *num_procs = nranks - remote_start;
214: if (num_entries) *num_entries = offset[nranks] - offset[remote_start];
215: } else {
216: if (num_procs) *num_procs = 0;
217: if (num_entries) *num_entries = 0;
218: }
219: PetscFunctionReturn(PETSC_SUCCESS);
220: }
222: /* Given a parallel VecScatter context, return a plan that represents the remote communication.
223: Any output parameter can be NULL.
225: Input Parameters:
226: + sf - the context
227: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
229: Output parameters:
230: + n - number of remote processors
231: . starts - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
232: Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
233: expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
234: with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
235: . indices - indices of entries to send/recv
236: . procs - ranks of remote processors
237: - bs - block size
239: .seealso: `VecScatterRestoreRemote_Private()`, `VecScatterGetRemoteOrdered_Private()`
240: */
241: PetscErrorCode VecScatterGetRemote_Private(VecScatter sf, PetscBool send, PetscInt *n, const PetscInt **starts, const PetscInt **indices, const PetscMPIInt **procs, PetscInt *bs)
242: {
243: PetscInt nranks, remote_start;
244: PetscMPIInt rank;
245: const PetscInt *offset, *location;
246: const PetscMPIInt *ranks;
248: PetscFunctionBegin;
249: PetscCall(PetscSFSetUp(sf));
250: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
252: if (send) PetscCall(PetscSFGetLeafRanks(sf, &nranks, &ranks, &offset, &location));
253: else PetscCall(PetscSFGetRootRanks(sf, &nranks, &ranks, &offset, &location, NULL));
255: if (nranks) {
256: remote_start = (rank == ranks[0]) ? 1 : 0;
257: if (n) *n = nranks - remote_start;
258: if (starts) *starts = &offset[remote_start];
259: if (indices) *indices = location; /* not &location[offset[remote_start]]. Starts[0] may point to the middle of indices[] */
260: if (procs) *procs = &ranks[remote_start];
261: } else {
262: if (n) *n = 0;
263: if (starts) *starts = NULL;
264: if (indices) *indices = NULL;
265: if (procs) *procs = NULL;
266: }
268: if (bs) *bs = 1;
269: PetscFunctionReturn(PETSC_SUCCESS);
270: }
272: /* Given a parallel VecScatter context, return a plan that represents the remote communication. Ranks of remote
273: processors returned in procs must be sorted in ascending order. Any output parameter can be NULL.
275: Input Parameters:
276: + sf - the context
277: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
279: Output parameters:
280: + n - number of remote processors
281: . starts - starting point in indices for each proc. ATTENTION: starts[0] is not necessarily zero.
282: Therefore, expressions like starts[i+1]-starts[i] and indices[starts[i]+j] work as
283: expected for a CSR structure but buf[starts[i]+j] may be out of range if buf was allocated
284: with length starts[n]-starts[0]. One should use buf[starts[i]-starts[0]+j] instead.
285: . indices - indices of entries to send/recv
286: . procs - ranks of remote processors
287: - bs - block size
289: Notes:
290: Output parameters like starts, indices must also be adapted according to the sorted ranks.
292: .seealso: `VecScatterRestoreRemoteOrdered_Private()`, `VecScatterGetRemote_Private()`
293: */
294: PetscErrorCode VecScatterGetRemoteOrdered_Private(VecScatter sf, PetscBool send, PetscInt *n, const PetscInt **starts, const PetscInt **indices, const PetscMPIInt **procs, PetscInt *bs)
295: {
296: PetscFunctionBegin;
297: PetscCall(VecScatterGetRemote_Private(sf, send, n, starts, indices, procs, bs));
298: if (PetscUnlikelyDebug(n && procs)) {
299: PetscInt i;
300: /* from back to front to also handle cases *n=0 */
301: for (i = *n - 1; i > 0; i--) PetscCheck((*procs)[i - 1] <= (*procs)[i], PETSC_COMM_SELF, PETSC_ERR_PLIB, "procs[] are not ordered");
302: }
303: PetscFunctionReturn(PETSC_SUCCESS);
304: }
306: /* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemote_Private. This gives a chance for
307: an implementation to free memory allocated in the VecScatterGetRemote_Private call.
309: Input Parameters:
310: + sf - the context
311: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
313: Output parameters:
314: + n - number of remote processors
315: . starts - starting point in indices for each proc
316: . indices - indices of entries to send/recv
317: . procs - ranks of remote processors
318: - bs - block size
320: .seealso: `VecScatterGetRemote_Private()`
321: */
322: PetscErrorCode VecScatterRestoreRemote_Private(VecScatter sf, PetscBool send, PetscInt *n, const PetscInt **starts, const PetscInt **indices, const PetscMPIInt **procs, PetscInt *bs)
323: {
324: PetscFunctionBegin;
325: if (starts) *starts = NULL;
326: if (indices) *indices = NULL;
327: if (procs) *procs = NULL;
328: PetscFunctionReturn(PETSC_SUCCESS);
329: }
331: /* Given a parallel VecScatter context, restore the plan returned by VecScatterGetRemoteOrdered_Private. This gives a chance for
332: an implementation to free memory allocated in the VecScatterGetRemoteOrdered_Private call.
334: Input Parameters:
335: + sf - the context
336: - send - true to select the send info (i.e., todata), otherwise to select the recv info (i.e., fromdata)
338: Output parameters:
339: + n - number of remote processors
340: . starts - starting point in indices for each proc
341: . indices - indices of entries to send/recv
342: . procs - ranks of remote processors
343: - bs - block size
345: .seealso: `VecScatterGetRemoteOrdered_Private()`
346: */
347: PetscErrorCode VecScatterRestoreRemoteOrdered_Private(VecScatter sf, PetscBool send, PetscInt *n, const PetscInt **starts, const PetscInt **indices, const PetscMPIInt **procs, PetscInt *bs)
348: {
349: PetscFunctionBegin;
350: PetscCall(VecScatterRestoreRemote_Private(sf, send, n, starts, indices, procs, bs));
351: PetscFunctionReturn(PETSC_SUCCESS);
352: }
354: /*@
355: VecScatterSetUp - Sets up the `VecScatter` to be able to actually scatter information between vectors
357: Collective
359: Input Parameter:
360: . sf - the scatter context
362: Level: intermediate
364: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterCopy()`
365: @*/
366: PetscErrorCode VecScatterSetUp(VecScatter sf)
367: {
368: PetscFunctionBegin;
369: PetscCall(PetscSFSetUp(sf));
370: PetscFunctionReturn(PETSC_SUCCESS);
371: }
373: /*@C
374: VecScatterSetType - Builds a vector scatter, for a particular vector scatter implementation.
376: Collective
378: Input Parameters:
379: + sf - The `VecScatter` object
380: - type - The name of the vector scatter type
382: Options Database Key:
383: . -sf_type <type> - Sets the `VecScatterType`
385: Level: intermediate
387: Note:
388: Use `VecScatterDuplicate()` to form additional vectors scatter of the same type as an existing vector scatter.
390: .seealso: [](sec_scatter), `VecScatter`, `VecScatterType`, `VecScatterGetType()`, `VecScatterCreate()`
391: @*/
392: PetscErrorCode VecScatterSetType(VecScatter sf, VecScatterType type)
393: {
394: PetscFunctionBegin;
395: PetscCall(PetscSFSetType(sf, type));
396: PetscFunctionReturn(PETSC_SUCCESS);
397: }
399: /*@C
400: VecScatterGetType - Gets the vector scatter type name (as a string) from the `VecScatter`.
402: Not Collective
404: Input Parameter:
405: . sf - The vector scatter
407: Output Parameter:
408: . type - The vector scatter type name
410: Level: intermediate
412: .seealso: [](sec_scatter), `VecScatter`, `VecScatterType`, `VecScatterSetType()`, `VecScatterCreate()`
413: @*/
414: PetscErrorCode VecScatterGetType(VecScatter sf, VecScatterType *type)
415: {
416: PetscFunctionBegin;
417: PetscCall(PetscSFGetType(sf, type));
418: PetscFunctionReturn(PETSC_SUCCESS);
419: }
421: /*@C
422: VecScatterRegister - Adds a new vector scatter component implementation
424: Not Collective
426: Input Parameters:
427: + sname - The name of a new user-defined creation routine
428: - function - The creation routine
430: Level: advanced
432: .seealso: [](sec_scatter), `VecScatter`, `VecScatterType`, `VecRegister()`
433: @*/
434: PetscErrorCode VecScatterRegister(const char sname[], PetscErrorCode (*function)(VecScatter))
435: {
436: PetscFunctionBegin;
437: PetscCall(PetscSFRegister(sname, function));
438: PetscFunctionReturn(PETSC_SUCCESS);
439: }
441: /* ------------------------------------------------------------------*/
442: /*@
443: VecScatterGetMerged - Returns true if the scatter is completed in the `VecScatterBegin()`
444: and the `VecScatterEnd()` does nothing
446: Not Collective
448: Input Parameter:
449: . sf - scatter context created with `VecScatterCreate()`
451: Output Parameter:
452: . flg - `PETSC_TRUE` if the `VecScatterBegin()`/`VecScatterEnd()` are all done during the `VecScatterBegin()`
454: Level: developer
456: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterEnd()`, `VecScatterBegin()`
457: @*/
458: PetscErrorCode VecScatterGetMerged(VecScatter sf, PetscBool *flg)
459: {
460: PetscFunctionBegin;
462: if (flg) *flg = sf->vscat.beginandendtogether;
463: PetscFunctionReturn(PETSC_SUCCESS);
464: }
465: /*@C
466: VecScatterDestroy - Destroys a scatter context created by `VecScatterCreate()`
468: Collective
470: Input Parameter:
471: . sf - the scatter context
473: Level: intermediate
475: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterCopy()`
476: @*/
477: PetscErrorCode VecScatterDestroy(VecScatter *sf)
478: {
479: PetscFunctionBegin;
480: PetscCall(PetscSFDestroy(sf));
481: PetscFunctionReturn(PETSC_SUCCESS);
482: }
484: /*@
485: VecScatterCopy - Makes a copy of a scatter context.
487: Collective
489: Input Parameter:
490: . sf - the scatter context
492: Output Parameter:
493: . newsf - the context copy
495: Level: advanced
497: .seealso: [](sec_scatter), `VecScatter`, `VecScatterType`, `VecScatterCreate()`, `VecScatterDestroy()`
498: @*/
499: PetscErrorCode VecScatterCopy(VecScatter sf, VecScatter *newsf)
500: {
501: PetscFunctionBegin;
502: PetscAssertPointer(newsf, 2);
503: PetscCall(PetscSFDuplicate(sf, PETSCSF_DUPLICATE_GRAPH, newsf));
504: PetscCall(PetscSFSetUp(*newsf));
505: PetscFunctionReturn(PETSC_SUCCESS);
506: }
508: /*@C
509: VecScatterViewFromOptions - View a `VecScatter` object based on values in the options database
511: Collective
513: Input Parameters:
514: + sf - the scatter context
515: . obj - Optional object
516: - name - command line option
518: Level: intermediate
520: Note:
521: See `PetscObjectViewFromOptions()` for available `PetscViewer` and `PetscViewerFormat` values
523: .seealso: [](sec_scatter), `VecScatter`, `VecScatterView()`, `PetscObjectViewFromOptions()`, `VecScatterCreate()`
524: @*/
525: PetscErrorCode VecScatterViewFromOptions(VecScatter sf, PetscObject obj, const char name[])
526: {
527: PetscFunctionBegin;
529: PetscCall(PetscObjectViewFromOptions((PetscObject)sf, obj, name));
530: PetscFunctionReturn(PETSC_SUCCESS);
531: }
533: /* ------------------------------------------------------------------*/
534: /*@C
535: VecScatterView - Views a vector scatter context.
537: Collective
539: Input Parameters:
540: + sf - the scatter context
541: - viewer - the viewer for displaying the context
543: Level: intermediate
545: .seealso: [](sec_scatter), `VecScatter`, `PetscViewer`, `VecScatterViewFromOptions()`, `PetscObjectViewFromOptions()`, `VecScatterCreate()`
546: @*/
547: PetscErrorCode VecScatterView(VecScatter sf, PetscViewer viewer)
548: {
549: PetscFunctionBegin;
550: PetscCall(PetscSFView(sf, viewer));
551: PetscFunctionReturn(PETSC_SUCCESS);
552: }
554: /*@C
555: VecScatterRemap - Remaps the "from" and "to" indices in a
556: vector scatter context.
558: Collective
560: Input Parameters:
561: + sf - vector scatter context
562: . tomap - remapping plan for "to" indices (may be `NULL`).
563: - frommap - remapping plan for "from" indices (may be `NULL`)
565: Level: developer
567: Notes:
568: In the parallel case the todata contains indices from where the data is taken
569: (and then sent to others)! The fromdata contains indices from where the received
570: data is finally put locally.
572: In the sequential case the todata contains indices from where the data is put
573: and the fromdata contains indices from where the data is taken from.
574: This is backwards from the parallel case!
576: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`
577: @*/
578: PetscErrorCode VecScatterRemap(VecScatter sf, PetscInt tomap[], PetscInt frommap[])
579: {
580: PetscFunctionBegin;
581: if (tomap) PetscAssertPointer(tomap, 2);
582: if (frommap) PetscAssertPointer(frommap, 3);
583: PetscCall(VecScatterRemap_Internal(sf, tomap, frommap));
584: PetscCheck(!frommap, PETSC_COMM_SELF, PETSC_ERR_SUP, "Unable to remap the FROM in scatters yet");
585: /* Mark then vector lengths as unknown because we do not know the lengths of the remapped vectors */
586: sf->vscat.from_n = -1;
587: sf->vscat.to_n = -1;
588: PetscFunctionReturn(PETSC_SUCCESS);
589: }
591: /*@
592: VecScatterSetFromOptions - Configures the vector scatter from values in the options database.
594: Collective
596: Input Parameter:
597: . sf - The vector scatter
599: Notes:
600: To see all options, run your program with the -help option, or consult the users manual.
602: Must be called before `VecScatterSetUp()` and before the vector scatter is used.
604: Level: beginner
606: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterDestroy()`, `VecScatterSetUp()`
607: @*/
608: PetscErrorCode VecScatterSetFromOptions(VecScatter sf)
609: {
610: PetscFunctionBegin;
612: PetscObjectOptionsBegin((PetscObject)sf);
614: sf->vscat.beginandendtogether = PETSC_FALSE;
615: PetscCall(PetscOptionsBool("-vecscatter_merge", "Use combined (merged) vector scatter begin and end", "VecScatterCreate", sf->vscat.beginandendtogether, &sf->vscat.beginandendtogether, NULL));
616: if (sf->vscat.beginandendtogether) PetscCall(PetscInfo(sf, "Using combined (merged) vector scatter begin and end\n"));
617: PetscOptionsEnd();
618: PetscFunctionReturn(PETSC_SUCCESS);
619: }
621: /*@
622: VecScatterCreate - Creates a vector scatter context.
624: Collective
626: Input Parameters:
627: + x - a vector that defines the shape (parallel data layout of the vector) of vectors from
628: which we scatter
629: . y - a vector that defines the shape (parallel data layout of the vector) of vectors to which
630: we scatter
631: . ix - the indices of xin to scatter (if `NULL` scatters all values)
632: - iy - the indices of yin to hold results (if `NULL` fills entire vector `yin` in order)
634: Output Parameter:
635: . newsf - location to store the new scatter context
637: Options Database Keys:
638: + -vecscatter_view - Prints detail of communications
639: . -vecscatter_view ::ascii_info - Print less details about communication
640: - -vecscatter_merge - `VecScatterBegin()` handles all of the communication, `VecScatterEnd()` is a nop
641: eliminates the chance for overlap of computation and communication
643: Level: intermediate
645: Notes:
646: If both `xin` and `yin` are parallel, their communicator must be on the same
647: set of processes, but their process order can be different.
648: In calls to the scatter options you can use different vectors than the `xin` and
649: `yin` you used above; BUT they must have the same parallel data layout, for example,
650: they could be obtained from `VecDuplicate()`.
651: A `VecScatter` context CANNOT be used in two or more simultaneous scatters;
652: that is you cannot call a second `VecScatterBegin()` with the same scatter
653: context until the `VecScatterEnd()` has been called on the first `VecScatterBegin()`.
654: In this case a separate `VecScatter` is needed for each concurrent scatter.
656: Both `ix` and `iy` cannot be `NULL` at the same time.
658: Use `VecScatterCreateToAll()` to create a vecscatter that copies an MPI vector to sequential vectors on all MPI ranks.
659: Use `VecScatterCreateToZero()` to create a vecscatter that copies an MPI vector to a sequential vector on MPI rank 0.
660: These special vecscatters have better performance than general ones.
662: .seealso: [](sec_scatter), `VecScatter`, `VecScatterDestroy()`, `VecScatterCreateToAll()`, `VecScatterCreateToZero()`, `PetscSFCreate()`
663: @*/
664: PetscErrorCode VecScatterCreate(Vec x, IS ix, Vec y, IS iy, VecScatter *newsf)
665: {
666: MPI_Comm xcomm, ycomm, bigcomm;
667: Vec xx, yy;
668: IS ix_old = ix, iy_old = iy, ixx, iyy;
669: PetscMPIInt xcommsize, ycommsize, rank, result;
670: PetscInt i, n, N, nroots, nleaves, *ilocal, xstart, ystart, ixsize, iysize, xlen, ylen;
671: const PetscInt *xindices, *yindices;
672: PetscSFNode *iremote;
673: PetscLayout xlayout, ylayout;
674: ISTypeID ixid, iyid;
675: PetscInt bs, bsx, bsy, min, max, m[2], mg[2], ixfirst, ixstep, iyfirst, iystep;
676: PetscBool can_do_block_opt = PETSC_FALSE;
677: PetscSF sf;
679: PetscFunctionBegin;
680: PetscAssertPointer(newsf, 5);
681: PetscCheck(ix || iy, PetscObjectComm((PetscObject)x), PETSC_ERR_SUP, "Cannot pass default in for both input and output indices");
683: /* Get comm from x and y */
684: PetscCall(PetscObjectGetComm((PetscObject)x, &xcomm));
685: PetscCallMPI(MPI_Comm_size(xcomm, &xcommsize));
686: PetscCall(PetscObjectGetComm((PetscObject)y, &ycomm));
687: PetscCallMPI(MPI_Comm_size(ycomm, &ycommsize));
688: if (xcommsize > 1 && ycommsize > 1) {
689: PetscCallMPI(MPI_Comm_compare(xcomm, ycomm, &result));
690: PetscCheck(result != MPI_UNEQUAL, PETSC_COMM_SELF, PETSC_ERR_ARG_NOTSAMECOMM, "VecScatterCreate: parallel vectors x and y must have identical/congruent/similar communicators");
691: }
692: bs = 1; /* default, no blocking */
694: /*
695: Let P and S stand for parallel and sequential vectors respectively. There are four combinations of vecscatters: PtoP, PtoS,
696: StoP and StoS. The assumption of VecScatterCreate(Vec x,IS ix,Vec y,IS iy,VecScatter *newctx) is: if x is parallel, then ix
697: contains global indices of x. If x is sequential, ix contains local indices of x. Similarly for y and iy.
699: SF builds around concepts of local leaves and remote roots. We treat source vector x as roots and destination vector y as
700: leaves. A PtoS scatter can be naturally mapped to SF. We transform PtoP and StoP to PtoS, and treat StoS as trivial PtoS.
701: */
703: /* NULL ix or iy in VecScatterCreate(x,ix,y,iy,newctx) has special meaning. Recover them for these cases */
704: if (!ix) {
705: if (xcommsize > 1 && ycommsize == 1) { /* PtoS: null ix means the whole x will be scattered to each seq y */
706: PetscCall(VecGetSize(x, &N));
707: PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &ix));
708: } else { /* PtoP, StoP or StoS: null ix means the whole local part of x will be scattered */
709: PetscCall(VecGetLocalSize(x, &n));
710: PetscCall(VecGetOwnershipRange(x, &xstart, NULL));
711: PetscCall(ISCreateStride(PETSC_COMM_SELF, n, xstart, 1, &ix));
712: }
713: }
715: if (!iy) {
716: if (xcommsize == 1 && ycommsize > 1) { /* StoP: null iy means the whole y will be scattered to from each seq x */
717: PetscCall(VecGetSize(y, &N));
718: PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iy));
719: } else { /* PtoP, StoP or StoS: null iy means the whole local part of y will be scattered to */
720: PetscCall(VecGetLocalSize(y, &n));
721: PetscCall(VecGetOwnershipRange(y, &ystart, NULL));
722: PetscCall(ISCreateStride(PETSC_COMM_SELF, n, ystart, 1, &iy));
723: }
724: }
726: /* Do error checking immediately after we have non-empty ix, iy */
727: PetscCall(ISGetLocalSize(ix, &ixsize));
728: PetscCall(ISGetLocalSize(iy, &iysize));
729: PetscCall(VecGetSize(x, &xlen));
730: PetscCall(VecGetSize(y, &ylen));
731: PetscCheck(ixsize == iysize, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Scatter sizes of ix and iy don't match locally ix=%" PetscInt_FMT " iy=%" PetscInt_FMT, ixsize, iysize);
732: PetscCall(ISGetMinMax(ix, &min, &max));
733: PetscCheck(min >= 0 && max < xlen, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Scatter indices in ix are out of range: found [%" PetscInt_FMT ",%" PetscInt_FMT "), expected in [0,%" PetscInt_FMT ")", min, max, xlen);
734: PetscCall(ISGetMinMax(iy, &min, &max));
735: PetscCheck(min >= 0 && max < ylen, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Scatter indices in iy are out of range: found [%" PetscInt_FMT ",%" PetscInt_FMT "), expected in [0,%" PetscInt_FMT ")", min, max, ylen);
737: /* Extract info about ix, iy for further test */
738: PetscCall(ISGetTypeID_Private(ix, &ixid));
739: PetscCall(ISGetTypeID_Private(iy, &iyid));
740: if (ixid == IS_BLOCK) PetscCall(ISGetBlockSize(ix, &bsx));
741: else if (ixid == IS_STRIDE) PetscCall(ISStrideGetInfo(ix, &ixfirst, &ixstep));
743: if (iyid == IS_BLOCK) PetscCall(ISGetBlockSize(iy, &bsy));
744: else if (iyid == IS_STRIDE) PetscCall(ISStrideGetInfo(iy, &iyfirst, &iystep));
746: /* Check if a PtoS is special ToAll/ToZero scatters, which can be results of VecScatterCreateToAll/Zero.
747: ToAll means a whole MPI vector is copied to a seq vector on every process. ToZero means a whole MPI
748: vector is copied to a seq vector on rank 0 and other processes do nothing(i.e.,they input empty ix,iy).
750: We can optimize these scatters with MPI collectives. We can also avoid costly analysis used for general scatters.
751: */
752: if (xcommsize > 1 && ycommsize == 1) { /* Ranks do not diverge at this if-test */
753: PetscInt pattern[2] = {0, 0}; /* A boolean array with pattern[0] for allgather-like (ToAll) and pattern[1] for gather-like (ToZero) */
754: PetscLayout map;
756: PetscCallMPI(MPI_Comm_rank(xcomm, &rank));
757: PetscCall(VecGetLayout(x, &map));
758: if (rank == 0) {
759: if (ixid == IS_STRIDE && iyid == IS_STRIDE && ixsize == xlen && ixfirst == 0 && ixstep == 1 && iyfirst == 0 && iystep == 1) {
760: /* Rank 0 scatters the whole mpi x to seq y, so it is either a ToAll or a ToZero candidate in its view */
761: pattern[0] = pattern[1] = 1;
762: }
763: } else {
764: if (ixid == IS_STRIDE && iyid == IS_STRIDE && ixsize == xlen && ixfirst == 0 && ixstep == 1 && iyfirst == 0 && iystep == 1) {
765: /* Other ranks also scatter the whole mpi x to seq y, so it is a ToAll candidate in their view */
766: pattern[0] = 1;
767: } else if (ixsize == 0) {
768: /* Other ranks do nothing, so it is a ToZero candidate */
769: pattern[1] = 1;
770: }
771: }
773: /* One stone (the expensive allreduce) two birds: pattern[] tells if it is ToAll or ToZero */
774: PetscCall(MPIU_Allreduce(MPI_IN_PLACE, pattern, 2, MPIU_INT, MPI_LAND, xcomm));
776: if (pattern[0] || pattern[1]) {
777: PetscCall(PetscSFCreate(xcomm, &sf));
778: PetscCall(PetscSFSetFromOptions(sf));
779: PetscCall(PetscSFSetGraphWithPattern(sf, map, pattern[0] ? PETSCSF_PATTERN_ALLGATHER : PETSCSF_PATTERN_GATHER));
780: goto functionend; /* No further analysis needed. What a big win! */
781: }
782: }
784: /* Continue ...
785: Do block optimization by taking advantage of high level info available in ix, iy.
786: The block optimization is valid when all of the following conditions are met:
787: 1) ix, iy are blocked or can be blocked (i.e., strided with step=1);
788: 2) ix, iy have the same block size;
789: 3) all processors agree on one block size;
790: 4) no blocks span more than one process;
791: */
792: bigcomm = (xcommsize == 1) ? ycomm : xcomm;
794: /* Processors could go through different path in this if-else test */
795: m[0] = PETSC_MAX_INT;
796: m[1] = PETSC_MIN_INT;
797: if (ixid == IS_BLOCK && iyid == IS_BLOCK) {
798: m[0] = PetscMin(bsx, bsy);
799: m[1] = PetscMax(bsx, bsy);
800: } else if (ixid == IS_BLOCK && iyid == IS_STRIDE && iystep == 1 && iyfirst % bsx == 0) {
801: m[0] = bsx;
802: m[1] = bsx;
803: } else if (ixid == IS_STRIDE && iyid == IS_BLOCK && ixstep == 1 && ixfirst % bsy == 0) {
804: m[0] = bsy;
805: m[1] = bsy;
806: }
807: /* Get max and min of bsx,bsy over all processes in one allreduce */
808: PetscCall(PetscGlobalMinMaxInt(bigcomm, m, mg));
810: /* Since we used allreduce above, all ranks will have the same min and max. min==max
811: implies all ranks have the same bs. Do further test to see if local vectors are dividable
812: by bs on ALL ranks. If they are, we are ensured that no blocks span more than one processor.
813: */
814: if (mg[0] == mg[1] && mg[0] > 1) {
815: PetscCall(VecGetLocalSize(x, &xlen));
816: PetscCall(VecGetLocalSize(y, &ylen));
817: m[0] = xlen % mg[0];
818: m[1] = ylen % mg[0];
819: PetscCall(MPIU_Allreduce(MPI_IN_PLACE, m, 2, MPIU_INT, MPI_LOR, bigcomm));
820: if (!m[0] && !m[1]) can_do_block_opt = PETSC_TRUE;
821: }
823: /* If can_do_block_opt, then shrink x, y, ix and iy by bs to get xx, yy, ixx and iyy, whose indices
824: and layout are actually used in building SF. Suppose blocked ix representing {0,1,2,6,7,8} has
825: indices {0,2} and bs=3, then ixx = {0,2}; suppose strided iy={3,4,5,6,7,8}, then iyy={1,2}.
827: xx is a little special. If x is seq, then xx is the concatenation of seq x's on ycomm. In this way,
828: we can treat PtoP and StoP uniformly as PtoS.
829: */
830: if (can_do_block_opt) {
831: const PetscInt *indices;
833: /* Shrink x and ix */
834: bs = mg[0];
835: PetscCall(VecCreateMPIWithArray(bigcomm, 1, xlen / bs, PETSC_DECIDE, NULL, &xx)); /* We only care xx's layout */
836: if (ixid == IS_BLOCK) {
837: PetscCall(ISBlockGetIndices(ix, &indices));
838: PetscCall(ISBlockGetLocalSize(ix, &ixsize));
839: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ixsize, indices, PETSC_COPY_VALUES, &ixx));
840: PetscCall(ISBlockRestoreIndices(ix, &indices));
841: } else { /* ixid == IS_STRIDE */
842: PetscCall(ISGetLocalSize(ix, &ixsize));
843: PetscCall(ISCreateStride(PETSC_COMM_SELF, ixsize / bs, ixfirst / bs, 1, &ixx));
844: }
846: /* Shrink y and iy */
847: PetscCall(VecCreateMPIWithArray(ycomm, 1, ylen / bs, PETSC_DECIDE, NULL, &yy));
848: if (iyid == IS_BLOCK) {
849: PetscCall(ISBlockGetIndices(iy, &indices));
850: PetscCall(ISBlockGetLocalSize(iy, &iysize));
851: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, iysize, indices, PETSC_COPY_VALUES, &iyy));
852: PetscCall(ISBlockRestoreIndices(iy, &indices));
853: } else { /* iyid == IS_STRIDE */
854: PetscCall(ISGetLocalSize(iy, &iysize));
855: PetscCall(ISCreateStride(PETSC_COMM_SELF, iysize / bs, iyfirst / bs, 1, &iyy));
856: }
857: } else {
858: ixx = ix;
859: iyy = iy;
860: yy = y;
861: if (xcommsize == 1) PetscCall(VecCreateMPIWithArray(bigcomm, 1, xlen, PETSC_DECIDE, NULL, &xx));
862: else xx = x;
863: }
865: /* Now it is ready to build SF with preprocessed (xx, yy) and (ixx, iyy) */
866: PetscCall(ISGetIndices(ixx, &xindices));
867: PetscCall(ISGetIndices(iyy, &yindices));
868: PetscCall(VecGetLayout(xx, &xlayout));
870: if (ycommsize > 1) {
871: /* PtoP or StoP */
873: /* Below is a piece of complex code with a very simple goal: move global index pairs (xindices[i], yindices[i]),
874: to owner process of yindices[i] according to ylayout, i = 0..n.
876: I did it through a temp sf, but later I thought the old design was inefficient and also distorted log view.
877: We want to map one VecScatterCreate() call to one PetscSFCreate() call. The old design mapped to three
878: PetscSFCreate() calls. This code is on critical path of VecScatterSetUp and is used by every VecScatterCreate.
879: So I commented it out and did another optimized implementation. The commented code is left here for reference.
880: */
881: #if 0
882: const PetscInt *degree;
883: PetscSF tmpsf;
884: PetscInt inedges=0,*leafdata,*rootdata;
886: PetscCall(VecGetOwnershipRange(xx,&xstart,NULL));
887: PetscCall(VecGetLayout(yy,&ylayout));
888: PetscCall(VecGetOwnershipRange(yy,&ystart,NULL));
890: PetscCall(VecGetLocalSize(yy,&nroots));
891: PetscCall(ISGetLocalSize(iyy,&nleaves));
892: PetscCall(PetscMalloc2(nleaves,&iremote,nleaves*2,&leafdata));
894: for (i=0; i<nleaves; i++) {
895: PetscCall(PetscLayoutFindOwnerIndex(ylayout,yindices[i],&iremote[i].rank,&iremote[i].index));
896: leafdata[2*i] = yindices[i];
897: leafdata[2*i+1] = (xcommsize > 1)? xindices[i] : xindices[i] + xstart;
898: }
900: PetscCall(PetscSFCreate(ycomm,&tmpsf));
901: PetscCall(PetscSFSetGraph(tmpsf,nroots,nleaves,NULL,PETSC_USE_POINTER,iremote,PETSC_USE_POINTER));
903: PetscCall(PetscSFComputeDegreeBegin(tmpsf,°ree));
904: PetscCall(PetscSFComputeDegreeEnd(tmpsf,°ree));
906: for (i=0; i<nroots; i++) inedges += degree[i];
907: PetscCall(PetscMalloc1(inedges*2,&rootdata));
908: PetscCall(PetscSFGatherBegin(tmpsf,MPIU_2INT,leafdata,rootdata));
909: PetscCall(PetscSFGatherEnd(tmpsf,MPIU_2INT,leafdata,rootdata));
911: PetscCall(PetscFree2(iremote,leafdata));
912: PetscCall(PetscSFDestroy(&tmpsf));
914: /* rootdata contains global index pairs (i, j). j's are owned by the current process, but i's can point to anywhere.
915: We convert j to local, and convert i to (rank, index). In the end, we get an PtoS suitable for building SF.
916: */
917: nleaves = inedges;
918: PetscCall(VecGetLocalSize(xx,&nroots));
919: PetscCall(PetscMalloc1(nleaves,&ilocal));
920: PetscCall(PetscMalloc1(nleaves,&iremote));
922: for (i=0; i<inedges; i++) {
923: ilocal[i] = rootdata[2*i] - ystart; /* convert y's global index to local index */
924: PetscCall(PetscLayoutFindOwnerIndex(xlayout,rootdata[2*i+1],&iremote[i].rank,&iremote[i].index)); /* convert x's global index to (rank, index) */
925: }
926: PetscCall(PetscFree(rootdata));
927: #else
928: PetscInt j, k, n, disp, rlentotal, *sstart, *xindices_sorted, *yindices_sorted;
929: const PetscInt *yrange;
930: PetscMPIInt nsend, nrecv, nreq, yrank, *sendto, *recvfrom, tag1, tag2;
931: PetscInt *slens, *rlens, count;
932: PetscInt *rxindices, *ryindices;
933: MPI_Request *reqs, *sreqs, *rreqs;
935: /* Sorting makes code simpler, faster and also helps getting rid of many O(P) arrays, which hurt scalability at large scale
936: yindices_sorted - sorted yindices
937: xindices_sorted - xindices sorted along with yindces
938: */
939: PetscCall(ISGetLocalSize(ixx, &n)); /*ixx, iyy have the same local size */
940: PetscCall(PetscMalloc2(n, &xindices_sorted, n, &yindices_sorted));
941: PetscCall(PetscArraycpy(xindices_sorted, xindices, n));
942: PetscCall(PetscArraycpy(yindices_sorted, yindices, n));
943: PetscCall(PetscSortIntWithArray(n, yindices_sorted, xindices_sorted));
944: PetscCall(VecGetOwnershipRange(xx, &xstart, NULL));
945: if (xcommsize == 1) {
946: for (i = 0; i < n; i++) xindices_sorted[i] += xstart;
947: } /* Convert to global indices */
949: /*
950: Calculate info about messages I need to send
951: nsend - number of non-empty messages to send
952: sendto - [nsend] ranks I will send messages to
953: sstart - [nsend+1] sstart[i] is the start index in xsindices_sorted[] I send to rank sendto[i]
954: slens - [ycommsize] I want to send slens[i] entries to rank i.
955: */
956: PetscCall(VecGetLayout(yy, &ylayout));
957: PetscCall(PetscLayoutGetRanges(ylayout, &yrange));
958: PetscCall(PetscCalloc1(ycommsize, &slens)); /* The only O(P) array in this algorithm */
960: i = j = nsend = 0;
961: while (i < n) {
962: if (yindices_sorted[i] >= yrange[j + 1]) { /* If i-th index is out of rank j's bound */
963: do {
964: j++;
965: } while (yindices_sorted[i] >= yrange[j + 1] && j < ycommsize); /* Increase j until i-th index falls in rank j's bound */
966: PetscCheck(j != ycommsize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Index %" PetscInt_FMT " not owned by any process, upper bound %" PetscInt_FMT, yindices_sorted[i], yrange[ycommsize]);
967: }
968: i++;
969: if (!slens[j]++) nsend++;
970: }
972: PetscCall(PetscMalloc2(nsend + 1, &sstart, nsend, &sendto));
974: sstart[0] = 0;
975: for (i = j = 0; i < ycommsize; i++) {
976: if (slens[i]) {
977: sendto[j] = (PetscMPIInt)i;
978: sstart[j + 1] = sstart[j] + slens[i];
979: j++;
980: }
981: }
983: /*
984: Calculate the reverse info about messages I will recv
985: nrecv - number of messages I will recv
986: recvfrom - [nrecv] ranks I recv from
987: rlens - [nrecv] I will recv rlens[i] entries from rank recvfrom[i]
988: rlentotal - sum of rlens[]
989: rxindices - [rlentotal] recv buffer for xindices_sorted
990: ryindices - [rlentotal] recv buffer for yindices_sorted
991: */
992: PetscCall(PetscGatherNumberOfMessages_Private(ycomm, NULL, slens, &nrecv));
993: PetscCall(PetscGatherMessageLengths_Private(ycomm, nsend, nrecv, slens, &recvfrom, &rlens));
994: PetscCall(PetscFree(slens)); /* Free the O(P) array ASAP */
995: rlentotal = 0;
996: for (i = 0; i < nrecv; i++) rlentotal += rlens[i];
998: /*
999: Communicate with processors in recvfrom[] to populate rxindices and ryindices
1000: */
1001: PetscCall(PetscCommGetNewTag(ycomm, &tag1));
1002: PetscCall(PetscCommGetNewTag(ycomm, &tag2));
1003: PetscCall(PetscMalloc2(rlentotal, &rxindices, rlentotal, &ryindices));
1004: PetscCall(PetscMPIIntCast((nsend + nrecv) * 2, &nreq));
1005: PetscCall(PetscMalloc1(nreq, &reqs));
1006: sreqs = reqs;
1007: rreqs = PetscSafePointerPlusOffset(reqs, nsend * 2);
1009: for (i = disp = 0; i < nrecv; i++) {
1010: count = rlens[i];
1011: PetscCallMPI(MPIU_Irecv(rxindices + disp, count, MPIU_INT, recvfrom[i], tag1, ycomm, rreqs + i));
1012: PetscCallMPI(MPIU_Irecv(ryindices + disp, count, MPIU_INT, recvfrom[i], tag2, ycomm, rreqs + nrecv + i));
1013: disp += rlens[i];
1014: }
1016: for (i = 0; i < nsend; i++) {
1017: count = sstart[i + 1] - sstart[i];
1018: PetscCallMPI(MPIU_Isend(xindices_sorted + sstart[i], count, MPIU_INT, sendto[i], tag1, ycomm, sreqs + i));
1019: PetscCallMPI(MPIU_Isend(yindices_sorted + sstart[i], count, MPIU_INT, sendto[i], tag2, ycomm, sreqs + nsend + i));
1020: }
1021: PetscCallMPI(MPI_Waitall(nreq, reqs, MPI_STATUS_IGNORE));
1023: /* Transform VecScatter into SF */
1024: nleaves = rlentotal;
1025: PetscCall(PetscMalloc1(nleaves, &ilocal));
1026: PetscCall(PetscMalloc1(nleaves, &iremote));
1027: PetscCallMPI(MPI_Comm_rank(ycomm, &yrank));
1028: for (i = disp = 0; i < nrecv; i++) {
1029: for (j = 0; j < rlens[i]; j++) {
1030: k = disp + j; /* k-th index pair */
1031: ilocal[k] = ryindices[k] - yrange[yrank]; /* Convert y's global index to local index */
1032: PetscCall(PetscLayoutFindOwnerIndex(xlayout, rxindices[k], &rank, &iremote[k].index)); /* Convert x's global index to (rank, index) */
1033: iremote[k].rank = rank;
1034: }
1035: disp += rlens[i];
1036: }
1038: PetscCall(PetscFree2(sstart, sendto));
1039: PetscCall(PetscFree(rlens));
1040: PetscCall(PetscFree(recvfrom));
1041: PetscCall(PetscFree(reqs));
1042: PetscCall(PetscFree2(rxindices, ryindices));
1043: PetscCall(PetscFree2(xindices_sorted, yindices_sorted));
1044: #endif
1045: } else {
1046: /* PtoS or StoS */
1047: PetscCall(ISGetLocalSize(iyy, &nleaves));
1048: PetscCall(PetscMalloc1(nleaves, &ilocal));
1049: PetscCall(PetscMalloc1(nleaves, &iremote));
1050: PetscCall(PetscArraycpy(ilocal, yindices, nleaves));
1051: for (i = 0; i < nleaves; i++) {
1052: PetscCall(PetscLayoutFindOwnerIndex(xlayout, xindices[i], &rank, &iremote[i].index));
1053: iremote[i].rank = rank;
1054: }
1055: }
1057: /* MUST build SF on xx's comm, which is not necessarily identical to yy's comm.
1058: In SF's view, xx contains the roots (i.e., the remote) and iremote[].rank are ranks in xx's comm.
1059: yy contains leaves, which are local and can be thought as part of PETSC_COMM_SELF. */
1060: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)xx), &sf));
1061: sf->allow_multi_leaves = PETSC_TRUE;
1062: PetscCall(PetscSFSetFromOptions(sf));
1063: PetscCall(VecGetLocalSize(xx, &nroots));
1064: PetscCall(PetscSFSetGraph(sf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); /* Give ilocal/iremote to petsc and no need to free them here */
1066: /* Free memory no longer needed */
1067: PetscCall(ISRestoreIndices(ixx, &xindices));
1068: PetscCall(ISRestoreIndices(iyy, &yindices));
1069: if (can_do_block_opt) {
1070: PetscCall(VecDestroy(&xx));
1071: PetscCall(VecDestroy(&yy));
1072: PetscCall(ISDestroy(&ixx));
1073: PetscCall(ISDestroy(&iyy));
1074: } else if (xcommsize == 1) {
1075: PetscCall(VecDestroy(&xx));
1076: }
1078: functionend:
1079: sf->vscat.bs = bs;
1080: if (sf->vscat.bs > 1) {
1081: PetscCallMPI(MPI_Type_contiguous(sf->vscat.bs, MPIU_SCALAR, &sf->vscat.unit));
1082: PetscCallMPI(MPI_Type_commit(&sf->vscat.unit));
1083: } else {
1084: sf->vscat.unit = MPIU_SCALAR;
1085: }
1086: PetscCall(VecGetLocalSize(x, &sf->vscat.from_n));
1087: PetscCall(VecGetLocalSize(y, &sf->vscat.to_n));
1088: if (!ix_old) PetscCall(ISDestroy(&ix)); /* We created helper ix, iy. Free them */
1089: if (!iy_old) PetscCall(ISDestroy(&iy));
1091: /* Set default */
1092: PetscCall(VecScatterSetFromOptions(sf));
1093: PetscCall(PetscSFSetUp(sf));
1095: *newsf = sf;
1096: PetscFunctionReturn(PETSC_SUCCESS);
1097: }
1099: /*@C
1100: VecScatterCreateToAll - Creates a vector and a scatter context that copies all
1101: vector values to each processor
1103: Collective
1105: Input Parameter:
1106: . vin - an `MPIVEC`
1108: Output Parameters:
1109: + ctx - scatter context
1110: - vout - output `SEQVEC` that is large enough to scatter into
1112: Level: intermediate
1114: Example Usage:
1115: .vb
1116: VecScatterCreateToAll(vin, &ctx, &vout);
1118: // scatter as many times as you need
1119: VecScatterBegin(ctx, vin, vout, INSERT_VALUES, SCATTER_FORWARD);
1120: VecScatterEnd(ctx, vin, vout, INSERT_VALUES, SCATTER_FORWARD);
1122: // destroy scatter context and local vector when no longer needed
1123: VecScatterDestroy(&ctx);
1124: VecDestroy(&vout);
1125: .ve
1127: Notes:
1128: `vout` may be `NULL` [`PETSC_NULL_VEC` from Fortran] if you do not
1129: need to have it created
1131: Do NOT create a vector and then pass it in as the final argument `vout`! `vout` is created by this routine
1132: automatically (unless you pass `NULL` in for that argument if you do not need it).
1134: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterCreateToZero()`, `VecScatterBegin()`, `VecScatterEnd()`
1135: @*/
1136: PetscErrorCode VecScatterCreateToAll(Vec vin, VecScatter *ctx, Vec *vout)
1137: {
1138: PetscInt N;
1139: IS is;
1140: Vec tmp;
1141: Vec *tmpv;
1142: PetscBool tmpvout = PETSC_FALSE;
1143: VecType roottype;
1145: PetscFunctionBegin;
1148: PetscAssertPointer(ctx, 2);
1149: if (vout) {
1150: PetscAssertPointer(vout, 3);
1151: tmpv = vout;
1152: } else {
1153: tmpvout = PETSC_TRUE;
1154: tmpv = &tmp;
1155: }
1157: /* Create seq vec on each proc, with the same size of the original vec */
1158: PetscCall(VecGetSize(vin, &N));
1159: PetscCall(VecGetRootType_Private(vin, &roottype));
1160: PetscCall(VecCreate(PETSC_COMM_SELF, tmpv));
1161: PetscCall(VecSetSizes(*tmpv, N, PETSC_DECIDE));
1162: PetscCall(VecSetType(*tmpv, roottype));
1163: /* Create the VecScatter ctx with the communication info */
1164: PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &is));
1165: PetscCall(VecScatterCreate(vin, is, *tmpv, is, ctx));
1166: PetscCall(ISDestroy(&is));
1167: if (tmpvout) PetscCall(VecDestroy(tmpv));
1168: PetscFunctionReturn(PETSC_SUCCESS);
1169: }
1171: /*@C
1172: VecScatterCreateToZero - Creates an output vector and a scatter context used to
1173: copy all vector values into the output vector on the zeroth processor
1175: Collective
1177: Input Parameter:
1178: . vin - `Vec` of type `MPIVEC`
1180: Output Parameters:
1181: + ctx - scatter context
1182: - vout - output `SEQVEC` that is large enough to scatter into on processor 0 and
1183: of length zero on all other processors
1185: Level: intermediate
1187: Example Usage:
1188: .vb
1189: VecScatterCreateToZero(vin, &ctx, &vout);
1191: // scatter as many times as you need
1192: VecScatterBegin(ctx, vin, vout, INSERT_VALUES, SCATTER_FORWARD);
1193: VecScatterEnd(ctx, vin, vout, INSERT_VALUES, SCATTER_FORWARD);
1195: // destroy scatter context and local vector when no longer needed
1196: VecScatterDestroy(&ctx);
1197: VecDestroy(&vout);
1198: .ve
1200: Notes:
1201: vout may be `NULL` [`PETSC_NULL_VEC` from Fortran] if you do not
1202: need to have it created
1204: Do NOT create a vector and then pass it in as the final argument `vout`! `vout` is created by this routine
1205: automatically (unless you pass `NULL` in for that argument if you do not need it).
1207: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterCreateToAll()`, `VecScatterBegin()`, `VecScatterEnd()`
1208: @*/
1209: PetscErrorCode VecScatterCreateToZero(Vec vin, VecScatter *ctx, Vec *vout)
1210: {
1211: PetscInt N;
1212: PetscMPIInt rank;
1213: IS is;
1214: Vec tmp;
1215: Vec *tmpv;
1216: PetscBool tmpvout = PETSC_FALSE;
1217: VecType roottype;
1219: PetscFunctionBegin;
1222: PetscAssertPointer(ctx, 2);
1223: if (vout) {
1224: PetscAssertPointer(vout, 3);
1225: tmpv = vout;
1226: } else {
1227: tmpvout = PETSC_TRUE;
1228: tmpv = &tmp;
1229: }
1231: /* Create vec on each proc, with the same size of the original vec all on process 0 */
1232: PetscCall(VecGetSize(vin, &N));
1233: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)vin), &rank));
1234: if (rank) N = 0;
1235: PetscCall(VecGetRootType_Private(vin, &roottype));
1236: PetscCall(VecCreate(PETSC_COMM_SELF, tmpv));
1237: PetscCall(VecSetSizes(*tmpv, N, PETSC_DECIDE));
1238: PetscCall(VecSetType(*tmpv, roottype));
1239: /* Create the VecScatter ctx with the communication info */
1240: PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &is));
1241: PetscCall(VecScatterCreate(vin, is, *tmpv, is, ctx));
1242: PetscCall(ISDestroy(&is));
1243: if (tmpvout) PetscCall(VecDestroy(tmpv));
1244: PetscFunctionReturn(PETSC_SUCCESS);
1245: }
1247: /*@
1248: VecScatterBegin - Begins a generalized scatter from one vector to
1249: another. Complete the scattering phase with `VecScatterEnd()`.
1251: Neighbor-wise Collective
1253: Input Parameters:
1254: + sf - scatter context generated by `VecScatterCreate()`
1255: . x - the vector from which we scatter
1256: . y - the vector to which we scatter
1257: . addv - either `ADD_VALUES`, `MAX_VALUES`, `MIN_VALUES` or `INSERT_VALUES`, with `INSERT_VALUES` mode any location
1258: not scattered to retains its old value; i.e. the vector is NOT first zeroed.
1259: - mode - the scattering mode, usually `SCATTER_FORWARD`. The available modes are: `SCATTER_FORWARD` or `SCATTER_REVERSE`
1261: Level: intermediate
1263: Notes:
1264: The vectors `x` and `y` need not be the same vectors used in the call
1265: to `VecScatterCreate()`, but `x` must have the same parallel data layout
1266: as that passed in as the `x` to `VecScatterCreate()`, similarly for the `y`.
1267: Most likely they have been obtained from `VecDuplicate()`.
1269: You cannot change the values in the input vector between the calls to `VecScatterBegin()`
1270: and `VecScatterEnd()`.
1272: If you use `SCATTER_REVERSE` the two arguments `x` and `y` should be reversed, from
1273: the `SCATTER_FORWARD`.
1275: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
1277: This scatter is far more general than the conventional
1278: scatter, since it can be a gather or a scatter or a combination,
1279: depending on the indices ix and iy. If x is a parallel vector and y
1280: is sequential, `VecScatterBegin()` can serve to gather values to a
1281: single processor. Similarly, if `y` is parallel and `x` sequential, the
1282: routine can scatter from one processor to many processors.
1284: .seealso: [](sec_scatter), `VecScatter`, `VecScatterCreate()`, `VecScatterEnd()`
1285: @*/
1286: PetscErrorCode VecScatterBegin(VecScatter sf, Vec x, Vec y, InsertMode addv, ScatterMode mode)
1287: {
1288: PetscInt to_n, from_n;
1290: PetscFunctionBegin;
1294: if (PetscDefined(USE_DEBUG)) {
1295: /*
1296: Error checking to make sure these vectors match the vectors used
1297: to create the vector scatter context. -1 in the from_n and to_n indicate the
1298: vector lengths are unknown (for example with mapped scatters) and thus
1299: no error checking is performed.
1300: */
1301: if (sf->vscat.from_n >= 0 && sf->vscat.to_n >= 0) {
1302: PetscCall(VecGetLocalSize(x, &from_n));
1303: PetscCall(VecGetLocalSize(y, &to_n));
1304: if (mode & SCATTER_REVERSE) {
1305: PetscCheck(to_n == sf->vscat.from_n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Vector wrong size %" PetscInt_FMT " for scatter %" PetscInt_FMT " (scatter reverse and vector to != sf from size)", to_n, sf->vscat.from_n);
1306: PetscCheck(from_n == sf->vscat.to_n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Vector wrong size %" PetscInt_FMT " for scatter %" PetscInt_FMT " (scatter reverse and vector from != sf to size)", from_n, sf->vscat.to_n);
1307: } else {
1308: PetscCheck(to_n == sf->vscat.to_n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Vector wrong size %" PetscInt_FMT " for scatter %" PetscInt_FMT " (scatter forward and vector to != sf to size)", to_n, sf->vscat.to_n);
1309: PetscCheck(from_n == sf->vscat.from_n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Vector wrong size %" PetscInt_FMT " for scatter %" PetscInt_FMT " (scatter forward and vector from != sf from size)", from_n, sf->vscat.from_n);
1310: }
1311: }
1312: }
1314: sf->vscat.logging = PETSC_TRUE;
1315: PetscCall(PetscLogEventBegin(VEC_ScatterBegin, sf, x, y, 0));
1316: PetscCall(VecScatterBegin_Internal(sf, x, y, addv, mode));
1317: if (sf->vscat.beginandendtogether) PetscCall(VecScatterEnd_Internal(sf, x, y, addv, mode));
1318: PetscCall(PetscLogEventEnd(VEC_ScatterBegin, sf, x, y, 0));
1319: sf->vscat.logging = PETSC_FALSE;
1320: PetscFunctionReturn(PETSC_SUCCESS);
1321: }
1323: /*@
1324: VecScatterEnd - Ends a generalized scatter from one vector to another. Call
1325: after first calling `VecScatterBegin()`.
1327: Neighbor-wise Collective
1329: Input Parameters:
1330: + sf - scatter context generated by `VecScatterCreate()`
1331: . x - the vector from which we scatter
1332: . y - the vector to which we scatter
1333: . addv - one of `ADD_VALUES`, `MAX_VALUES`, `MIN_VALUES` or `INSERT_VALUES`
1334: - mode - the scattering mode, usually `SCATTER_FORWARD`. The available modes are: `SCATTER_FORWARD`, `SCATTER_REVERSE`
1336: Level: intermediate
1338: Notes:
1339: If you use `SCATTER_REVERSE` the arguments `x` and `y` should be reversed, from the `SCATTER_FORWARD`.
1341: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
1343: .seealso: [](sec_scatter), `VecScatter`, `VecScatterBegin()`, `VecScatterCreate()`
1344: @*/
1345: PetscErrorCode VecScatterEnd(VecScatter sf, Vec x, Vec y, InsertMode addv, ScatterMode mode)
1346: {
1347: PetscFunctionBegin;
1351: if (!sf->vscat.beginandendtogether) {
1352: sf->vscat.logging = PETSC_TRUE;
1353: PetscCall(PetscLogEventBegin(VEC_ScatterEnd, sf, x, y, 0));
1354: PetscCall(VecScatterEnd_Internal(sf, x, y, addv, mode));
1355: PetscCall(PetscLogEventEnd(VEC_ScatterEnd, sf, x, y, 0));
1356: sf->vscat.logging = PETSC_FALSE;
1357: }
1358: PetscFunctionReturn(PETSC_SUCCESS);
1359: }