Actual source code: pbvec.c

petsc-3.14.6 2021-03-30
Report Typos and Errors

  2: /*
  3:    This file contains routines for Parallel vector operations.
  4:  */
  5: #include <petscsys.h>
  6: #include <../src/vec/vec/impls/mpi/pvecimpl.h>

  8: PetscErrorCode VecDot_MPI(Vec xin,Vec yin,PetscScalar *z)
  9: {
 10:   PetscScalar    sum,work;

 14:   VecDot_Seq(xin,yin,&work);
 15:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 16:   *z   = sum;
 17:   return(0);
 18: }

 20: PetscErrorCode VecTDot_MPI(Vec xin,Vec yin,PetscScalar *z)
 21: {
 22:   PetscScalar    sum,work;

 26:   VecTDot_Seq(xin,yin,&work);
 27:   MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
 28:   *z   = sum;
 29:   return(0);
 30: }

 32: extern PetscErrorCode VecView_MPI_Draw(Vec,PetscViewer);

 34: static PetscErrorCode VecPlaceArray_MPI(Vec vin,const PetscScalar *a)
 35: {
 37:   Vec_MPI        *v = (Vec_MPI*)vin->data;

 40:   if (v->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
 41:   v->unplacedarray = v->array;  /* save previous array so reset can bring it back */
 42:   v->array         = (PetscScalar*)a;
 43:   if (v->localrep) {
 44:     VecPlaceArray(v->localrep,a);
 45:   }
 46:   return(0);
 47: }

 49: PetscErrorCode VecDuplicate_MPI(Vec win,Vec *v)
 50: {
 52:   Vec_MPI        *vw,*w = (Vec_MPI*)win->data;
 53:   PetscScalar    *array;

 56:   VecCreate(PetscObjectComm((PetscObject)win),v);
 57:   PetscLayoutReference(win->map,&(*v)->map);

 59:   VecCreate_MPI_Private(*v,PETSC_TRUE,w->nghost,NULL);
 60:   vw   = (Vec_MPI*)(*v)->data;
 61:   PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));

 63:   /* save local representation of the parallel vector (and scatter) if it exists */
 64:   if (w->localrep) {
 65:     VecGetArray(*v,&array);
 66:     VecCreateSeqWithArray(PETSC_COMM_SELF,PetscAbs(win->map->bs),win->map->n+w->nghost,array,&vw->localrep);
 67:     PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
 68:     VecRestoreArray(*v,&array);
 69:     PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);

 71:     vw->localupdate = w->localupdate;
 72:     if (vw->localupdate) {
 73:       PetscObjectReference((PetscObject)vw->localupdate);
 74:     }
 75:   }

 77:   /* New vector should inherit stashing property of parent */
 78:   (*v)->stash.donotstash   = win->stash.donotstash;
 79:   (*v)->stash.ignorenegidx = win->stash.ignorenegidx;

 81:   PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
 82:   PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);

 84:   (*v)->map->bs   = PetscAbs(win->map->bs);
 85:   (*v)->bstash.bs = win->bstash.bs;
 86:   return(0);
 87: }


 90: static PetscErrorCode VecSetOption_MPI(Vec V,VecOption op,PetscBool flag)
 91: {
 92:   Vec_MPI        *v = (Vec_MPI*)V->data;

 96:   switch (op) {
 97:   case VEC_IGNORE_OFF_PROC_ENTRIES: V->stash.donotstash = flag;
 98:     break;
 99:   case VEC_IGNORE_NEGATIVE_INDICES: V->stash.ignorenegidx = flag;
100:     break;
101:   case VEC_SUBSET_OFF_PROC_ENTRIES:
102:     v->assembly_subset = flag; /* See the same logic in MatAssembly wrt MAT_SUBSET_OFF_PROC_ENTRIES */
103:     if (!v->assembly_subset) { /* User indicates "do not reuse the communication pattern" */
104:       VecAssemblyReset_MPI(V); /* Reset existing pattern to free memory */
105:       v->first_assembly_done = PETSC_FALSE; /* Mark the first assembly is not done */
106:     }
107:     break;
108:   }

110:   return(0);
111: }


114: static PetscErrorCode VecResetArray_MPI(Vec vin)
115: {
116:   Vec_MPI        *v = (Vec_MPI*)vin->data;

120:   v->array         = v->unplacedarray;
121:   v->unplacedarray = NULL;
122:   if (v->localrep) {
123:     VecResetArray(v->localrep);
124:   }
125:   return(0);
126: }

128: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rankid,PetscMPIInt rank,void *sdata,MPI_Request req[],void *ctx)
129: {
130:   Vec X = (Vec)ctx;
131:   Vec_MPI *x = (Vec_MPI*)X->data;
132:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)sdata;
133:   PetscInt bs = X->map->bs;

137:   /* x->first_assembly_done indicates we are reusing a communication network. In that case, some
138:      messages can be empty, but we have to send them this time if we sent them before because the
139:      receiver is expecting them.
140:    */
141:   if (hdr->count || (x->first_assembly_done && x->sendptrs[rankid].ints)) {
142:     MPI_Isend(x->sendptrs[rankid].ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
143:     MPI_Isend(x->sendptrs[rankid].scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
144:   }
145:   if (hdr->bcount || (x->first_assembly_done && x->sendptrs[rankid].intb)) {
146:     MPI_Isend(x->sendptrs[rankid].intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
147:     MPI_Isend(x->sendptrs[rankid].scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
148:   }
149:   return(0);
150: }

152: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rank,void *rdata,MPI_Request req[],void *ctx)
153: {
154:   Vec X = (Vec)ctx;
155:   Vec_MPI *x = (Vec_MPI*)X->data;
156:   VecAssemblyHeader *hdr = (VecAssemblyHeader*)rdata;
158:   PetscInt bs = X->map->bs;
159:   VecAssemblyFrame *frame;

162:   PetscSegBufferGet(x->segrecvframe,1,&frame);

164:   if (hdr->count) {
165:     PetscSegBufferGet(x->segrecvint,hdr->count,&frame->ints);
166:     MPI_Irecv(frame->ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
167:     PetscSegBufferGet(x->segrecvscalar,hdr->count,&frame->scalars);
168:     MPI_Irecv(frame->scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
169:     frame->pendings = 2;
170:   } else {
171:     frame->ints = NULL;
172:     frame->scalars = NULL;
173:     frame->pendings = 0;
174:   }

176:   if (hdr->bcount) {
177:     PetscSegBufferGet(x->segrecvint,hdr->bcount,&frame->intb);
178:     MPI_Irecv(frame->intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
179:     PetscSegBufferGet(x->segrecvscalar,hdr->bcount*bs,&frame->scalarb);
180:     MPI_Irecv(frame->scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
181:     frame->pendingb = 2;
182:   } else {
183:     frame->intb = NULL;
184:     frame->scalarb = NULL;
185:     frame->pendingb = 0;
186:   }
187:   return(0);
188: }

190: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
191: {
192:   Vec_MPI        *x = (Vec_MPI*)X->data;
194:   MPI_Comm       comm;
195:   PetscInt       i,j,jb,bs;

198:   if (X->stash.donotstash) return(0);

200:   PetscObjectGetComm((PetscObject)X,&comm);
201:   VecGetBlockSize(X,&bs);
202:   if (PetscDefined(USE_DEBUG)) {
203:     InsertMode addv;
204:     MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
205:     if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
206:   }
207:   X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */

209:   VecStashSortCompress_Private(&X->stash);
210:   VecStashSortCompress_Private(&X->bstash);

212:   if (!x->sendranks) {
213:     PetscMPIInt nowners,bnowners,*owners,*bowners;
214:     PetscInt ntmp;
215:     VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);
216:     VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);
217:     PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);
218:     x->nsendranks = ntmp;
219:     PetscFree(owners);
220:     PetscFree(bowners);
221:     PetscMalloc1(x->nsendranks,&x->sendhdr);
222:     PetscCalloc1(x->nsendranks,&x->sendptrs);
223:   }
224:   for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
225:     PetscMPIInt rank = x->sendranks[i];
226:     x->sendhdr[i].insertmode = X->stash.insertmode;
227:     /* Initialize pointers for non-empty stashes the first time around.  Subsequent assemblies with
228:      * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
229:      * there is nothing new to send, so that size-zero messages get sent instead. */
230:     x->sendhdr[i].count = 0;
231:     if (X->stash.n) {
232:       x->sendptrs[i].ints    = &X->stash.idx[j];
233:       x->sendptrs[i].scalars = &X->stash.array[j];
234:       for (; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
235:     }
236:     x->sendhdr[i].bcount = 0;
237:     if (X->bstash.n) {
238:       x->sendptrs[i].intb    = &X->bstash.idx[jb];
239:       x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
240:       for (; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
241:     }
242:   }

244:   if (!x->segrecvint) {PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);}
245:   if (!x->segrecvscalar) {PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);}
246:   if (!x->segrecvframe) {PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);}
247:   if (x->first_assembly_done) { /* this is not the first assembly */
248:     PetscMPIInt tag[4];
249:     for (i=0; i<4; i++) {PetscCommGetNewTag(comm,&tag[i]);}
250:     for (i=0; i<x->nsendranks; i++) {
251:       VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);
252:     }
253:     for (i=0; i<x->nrecvranks; i++) {
254:       VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);
255:     }
256:     x->use_status = PETSC_TRUE;
257:   } else { /* First time assembly */
258:     PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);
259:     x->use_status = PETSC_FALSE;
260:   }

262:   /* The first_assembly_done flag is only meaningful when x->assembly_subset is set.
263:      This line says when assembly_subset is set, then we mark that the first assembly is done.
264:    */
265:   x->first_assembly_done = x->assembly_subset;

267:   {
268:     PetscInt nstash,reallocs;
269:     VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);
270:     PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
271:     VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);
272:     PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
273:   }
274:   return(0);
275: }

277: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
278: {
279:   Vec_MPI *x = (Vec_MPI*)X->data;
280:   PetscInt bs = X->map->bs;
281:   PetscMPIInt npending,*some_indices,r;
282:   MPI_Status  *some_statuses;
283:   PetscScalar *xarray;
285:   VecAssemblyFrame *frame;

288:   if (X->stash.donotstash) {
289:     X->stash.insertmode = NOT_SET_VALUES;
290:     X->bstash.insertmode = NOT_SET_VALUES;
291:     return(0);
292:   }

294:   if (!x->segrecvframe) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing segrecvframe! Probably you forgot to call VecAssemblyBegin first");
295:   VecGetArray(X,&xarray);
296:   PetscSegBufferExtractInPlace(x->segrecvframe,&frame);
297:   PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);
298:   for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
299:   while (npending>0) {
300:     PetscMPIInt ndone=0,ii;
301:     /* Filling MPI_Status fields requires some resources from the MPI library.  We skip it on the first assembly, or
302:      * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
303:      * rendezvous.  When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
304:      * subsequent assembly can set a proper subset of the values. */
305:     MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);
306:     for (ii=0; ii<ndone; ii++) {
307:       PetscInt i = some_indices[ii]/4,j,k;
308:       InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
309:       PetscInt *recvint;
310:       PetscScalar *recvscalar;
311:       PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0);
312:       PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1);
313:       npending--;
314:       if (!blockmsg) { /* Scalar stash */
315:         PetscMPIInt count;
316:         if (--frame[i].pendings > 0) continue;
317:         if (x->use_status) {
318:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
319:         } else count = x->recvhdr[i].count;
320:         for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) {
321:           PetscInt loc = *recvint - X->map->rstart;
322:           if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend);
323:           switch (imode) {
324:           case ADD_VALUES:
325:             xarray[loc] += *recvscalar++;
326:             break;
327:           case INSERT_VALUES:
328:             xarray[loc] = *recvscalar++;
329:             break;
330:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
331:           }
332:         }
333:       } else {                  /* Block stash */
334:         PetscMPIInt count;
335:         if (--frame[i].pendingb > 0) continue;
336:         if (x->use_status) {
337:           MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
338:           if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
339:         } else count = x->recvhdr[i].bcount;
340:         for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) {
341:           PetscInt loc = (*recvint)*bs - X->map->rstart;
342:           switch (imode) {
343:           case ADD_VALUES:
344:             for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++;
345:             break;
346:           case INSERT_VALUES:
347:             for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++;
348:             break;
349:           default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
350:           }
351:         }
352:       }
353:     }
354:   }
355:   VecRestoreArray(X,&xarray);
356:   MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);
357:   PetscFree2(some_indices,some_statuses);
358:   if (x->assembly_subset) {
359:     void *dummy;                /* reset segbuffers */
360:     PetscSegBufferExtractInPlace(x->segrecvint,&dummy);
361:     PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);
362:   } else {
363:     VecAssemblyReset_MPI(X);
364:   }

366:   X->stash.insertmode = NOT_SET_VALUES;
367:   X->bstash.insertmode = NOT_SET_VALUES;
368:   VecStashScatterEnd_Private(&X->stash);
369:   VecStashScatterEnd_Private(&X->bstash);
370:   return(0);
371: }

373: PetscErrorCode VecAssemblyReset_MPI(Vec X)
374: {
375:   Vec_MPI *x = (Vec_MPI*)X->data;

379:   PetscFree(x->sendreqs);
380:   PetscFree(x->recvreqs);
381:   PetscFree(x->sendranks);
382:   PetscFree(x->recvranks);
383:   PetscFree(x->sendhdr);
384:   PetscFree(x->recvhdr);
385:   PetscFree(x->sendptrs);
386:   PetscSegBufferDestroy(&x->segrecvint);
387:   PetscSegBufferDestroy(&x->segrecvscalar);
388:   PetscSegBufferDestroy(&x->segrecvframe);
389:   return(0);
390: }


393: static PetscErrorCode VecSetFromOptions_MPI(PetscOptionItems *PetscOptionsObject,Vec X)
394: {
395: #if !defined(PETSC_HAVE_MPIUNI)
397:   PetscBool      flg = PETSC_FALSE,set;

400:   PetscOptionsHead(PetscOptionsObject,"VecMPI Options");
401:   PetscOptionsBool("-vec_assembly_legacy","Use MPI 1 version of assembly","",flg,&flg,&set);
402:   if (set) {
403:     X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI : VecAssemblyBegin_MPI_BTS;
404:     X->ops->assemblyend   = flg ? VecAssemblyEnd_MPI   : VecAssemblyEnd_MPI_BTS;
405:   }
406:   PetscOptionsTail();
407: #else
408:   X->ops->assemblybegin =  VecAssemblyBegin_MPI;
409:   X->ops->assemblyend   =  VecAssemblyEnd_MPI;
410: #endif
411:   return(0);
412: }


415: static struct _VecOps DvOps = { VecDuplicate_MPI, /* 1 */
416:                                 VecDuplicateVecs_Default,
417:                                 VecDestroyVecs_Default,
418:                                 VecDot_MPI,
419:                                 VecMDot_MPI,
420:                                 VecNorm_MPI,
421:                                 VecTDot_MPI,
422:                                 VecMTDot_MPI,
423:                                 VecScale_Seq,
424:                                 VecCopy_Seq, /* 10 */
425:                                 VecSet_Seq,
426:                                 VecSwap_Seq,
427:                                 VecAXPY_Seq,
428:                                 VecAXPBY_Seq,
429:                                 VecMAXPY_Seq,
430:                                 VecAYPX_Seq,
431:                                 VecWAXPY_Seq,
432:                                 VecAXPBYPCZ_Seq,
433:                                 VecPointwiseMult_Seq,
434:                                 VecPointwiseDivide_Seq,
435:                                 VecSetValues_MPI, /* 20 */
436:                                 VecAssemblyBegin_MPI_BTS,
437:                                 VecAssemblyEnd_MPI_BTS,
438:                                 NULL,
439:                                 VecGetSize_MPI,
440:                                 VecGetSize_Seq,
441:                                 NULL,
442:                                 VecMax_MPI,
443:                                 VecMin_MPI,
444:                                 VecSetRandom_Seq,
445:                                 VecSetOption_MPI,
446:                                 VecSetValuesBlocked_MPI,
447:                                 VecDestroy_MPI,
448:                                 VecView_MPI,
449:                                 VecPlaceArray_MPI,
450:                                 VecReplaceArray_Seq,
451:                                 VecDot_Seq,
452:                                 VecTDot_Seq,
453:                                 VecNorm_Seq,
454:                                 VecMDot_Seq,
455:                                 VecMTDot_Seq,
456:                                 VecLoad_Default,
457:                                 VecReciprocal_Default,
458:                                 VecConjugate_Seq,
459:                                 NULL,
460:                                 NULL,
461:                                 VecResetArray_MPI,
462:                                 VecSetFromOptions_MPI,/*set from options */
463:                                 VecMaxPointwiseDivide_Seq,
464:                                 VecPointwiseMax_Seq,
465:                                 VecPointwiseMaxAbs_Seq,
466:                                 VecPointwiseMin_Seq,
467:                                 VecGetValues_MPI,
468:                                 NULL,
469:                                 NULL,
470:                                 NULL,
471:                                 NULL,
472:                                 NULL,
473:                                 NULL,
474:                                 VecStrideGather_Default,
475:                                 VecStrideScatter_Default,
476:                                 NULL,
477:                                 NULL,
478:                                 NULL,
479:                                 NULL,
480:                                 NULL,
481:                                 VecStrideSubSetGather_Default,
482:                                 VecStrideSubSetScatter_Default,
483:                                 NULL,
484:                                 NULL
485: };

487: /*
488:     VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
489:     VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
490:     VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()

492:     If alloc is true and array is NULL then this routine allocates the space, otherwise
493:     no space is allocated.
494: */
495: PetscErrorCode VecCreate_MPI_Private(Vec v,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
496: {
497:   Vec_MPI        *s;

501:   PetscNewLog(v,&s);
502:   v->data        = (void*)s;
503:   PetscMemcpy(v->ops,&DvOps,sizeof(DvOps));
504:   s->nghost      = nghost;
505:   v->petscnative = PETSC_TRUE;
506:   if (array) v->offloadmask = PETSC_OFFLOAD_CPU;

508:   PetscLayoutSetUp(v->map);

510:   s->array           = (PetscScalar*)array;
511:   s->array_allocated = NULL;
512:   if (alloc && !array) {
513:     PetscInt n = v->map->n+nghost;
514:     PetscCalloc1(n,&s->array);
515:     PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));
516:     s->array_allocated = s->array;
517:   }

519:   /* By default parallel vectors do not have local representation */
520:   s->localrep    = NULL;
521:   s->localupdate = NULL;

523:   v->stash.insertmode = NOT_SET_VALUES;
524:   v->bstash.insertmode = NOT_SET_VALUES;
525:   /* create the stashes. The block-size for bstash is set later when
526:      VecSetValuesBlocked is called.
527:   */
528:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),1,&v->stash);
529:   VecStashCreate_Private(PetscObjectComm((PetscObject)v),PetscAbs(v->map->bs),&v->bstash);

531: #if defined(PETSC_HAVE_MATLAB_ENGINE)
532:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEnginePut_C",VecMatlabEnginePut_Default);
533:   PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEngineGet_C",VecMatlabEngineGet_Default);
534: #endif
535:   PetscObjectChangeTypeName((PetscObject)v,VECMPI);
536:   return(0);
537: }

539: /*MC
540:    VECMPI - VECMPI = "mpi" - The basic parallel vector

542:    Options Database Keys:
543: . -vec_type mpi - sets the vector type to VECMPI during a call to VecSetFromOptions()

545:   Level: beginner

547: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMPI()
548: M*/

550: PetscErrorCode VecCreate_MPI(Vec vv)
551: {

555:   VecCreate_MPI_Private(vv,PETSC_TRUE,0,NULL);
556:   return(0);
557: }

559: /*MC
560:    VECSTANDARD = "standard" - A VECSEQ on one process and VECMPI on more than one process

562:    Options Database Keys:
563: . -vec_type standard - sets a vector type to standard on calls to VecSetFromOptions()

565:   Level: beginner

567: .seealso: VecCreateSeq(), VecCreateMPI()
568: M*/

570: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
571: {
573:   PetscMPIInt    size;

576:   MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
577:   if (size == 1) {
578:     VecSetType(v,VECSEQ);
579:   } else {
580:     VecSetType(v,VECMPI);
581:   }
582:   return(0);
583: }

585: /*@C
586:    VecCreateMPIWithArray - Creates a parallel, array-style vector,
587:    where the user provides the array space to store the vector values.

589:    Collective

591:    Input Parameters:
592: +  comm  - the MPI communicator to use
593: .  bs    - block size, same meaning as VecSetBlockSize()
594: .  n     - local vector length, cannot be PETSC_DECIDE
595: .  N     - global vector length (or PETSC_DECIDE to have calculated)
596: -  array - the user provided array to store the vector values

598:    Output Parameter:
599: .  vv - the vector

601:    Notes:
602:    Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
603:    same type as an existing vector.

605:    If the user-provided array is NULL, then VecPlaceArray() can be used
606:    at a later stage to SET the array for storing the vector values.

608:    PETSc does NOT free the array when the vector is destroyed via VecDestroy().
609:    The user should not free the array until the vector is destroyed.

611:    Level: intermediate

613: .seealso: VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
614:           VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()

616: @*/
617: PetscErrorCode  VecCreateMPIWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
618: {

622:   if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
623:   PetscSplitOwnership(comm,&n,&N);
624:   VecCreate(comm,vv);
625:   VecSetSizes(*vv,n,N);
626:   VecSetBlockSize(*vv,bs);
627:   VecCreate_MPI_Private(*vv,PETSC_FALSE,0,array);
628:   return(0);
629: }

631: /*@C
632:    VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
633:    the caller allocates the array space.

635:    Collective

637:    Input Parameters:
638: +  comm - the MPI communicator to use
639: .  n - local vector length
640: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
641: .  nghost - number of local ghost points
642: .  ghosts - global indices of ghost points (or NULL if not needed), these do not need to be in increasing order (sorted)
643: -  array - the space to store the vector values (as long as n + nghost)

645:    Output Parameter:
646: .  vv - the global vector representation (without ghost points as part of vector)

648:    Notes:
649:    Use VecGhostGetLocalForm() to access the local, ghosted representation
650:    of the vector.

652:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

654:    Level: advanced


657: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
658:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
659:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

661: @*/
662: PetscErrorCode  VecCreateGhostWithArray(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
663: {
664:   PetscErrorCode         ierr;
665:   Vec_MPI                *w;
666:   PetscScalar            *larray;
667:   IS                     from,to;
668:   ISLocalToGlobalMapping ltog;
669:   PetscInt               rstart,i,*indices;

672:   *vv = NULL;

674:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
675:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
676:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
677:   PetscSplitOwnership(comm,&n,&N);
678:   /* Create global representation */
679:   VecCreate(comm,vv);
680:   VecSetSizes(*vv,n,N);
681:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost,array);
682:   w    = (Vec_MPI*)(*vv)->data;
683:   /* Create local representation */
684:   VecGetArray(*vv,&larray);
685:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
686:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
687:   VecRestoreArray(*vv,&larray);

689:   /*
690:        Create scatter context for scattering (updating) ghost values
691:   */
692:   ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
693:   ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
694:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
695:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
696:   ISDestroy(&to);
697:   ISDestroy(&from);

699:   /* set local to global mapping for ghosted vector */
700:   PetscMalloc1(n+nghost,&indices);
701:   VecGetOwnershipRange(*vv,&rstart,NULL);
702:   for (i=0; i<n; i++) {
703:     indices[i] = rstart + i;
704:   }
705:   for (i=0; i<nghost; i++) {
706:     indices[n+i] = ghosts[i];
707:   }
708:   ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
709:   VecSetLocalToGlobalMapping(*vv,ltog);
710:   ISLocalToGlobalMappingDestroy(&ltog);
711:   return(0);
712: }

714: /*@
715:    VecCreateGhost - Creates a parallel vector with ghost padding on each processor.

717:    Collective

719:    Input Parameters:
720: +  comm - the MPI communicator to use
721: .  n - local vector length
722: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
723: .  nghost - number of local ghost points
724: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)

726:    Output Parameter:
727: .  vv - the global vector representation (without ghost points as part of vector)

729:    Notes:
730:    Use VecGhostGetLocalForm() to access the local, ghosted representation
731:    of the vector.

733:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

735:    Level: advanced

737: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
738:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
739:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
740:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()

742: @*/
743: PetscErrorCode  VecCreateGhost(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
744: {

748:   VecCreateGhostWithArray(comm,n,N,nghost,ghosts,NULL,vv);
749:   return(0);
750: }

752: /*@
753:    VecMPISetGhost - Sets the ghost points for an MPI ghost vector

755:    Collective on Vec

757:    Input Parameters:
758: +  vv - the MPI vector
759: .  nghost - number of local ghost points
760: -  ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)


763:    Notes:
764:    Use VecGhostGetLocalForm() to access the local, ghosted representation
765:    of the vector.

767:    This also automatically sets the ISLocalToGlobalMapping() for this vector.

769:    You must call this AFTER you have set the type of the vector (with VecSetType()) and the size (with VecSetSizes()).

771:    Level: advanced

773: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
774:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
775:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
776:           VecCreateGhostBlock(), VecCreateGhostBlockWithArray()

778: @*/
779: PetscErrorCode  VecMPISetGhost(Vec vv,PetscInt nghost,const PetscInt ghosts[])
780: {
782:   PetscBool      flg;

785:   PetscObjectTypeCompare((PetscObject)vv,VECMPI,&flg);
786:   /* if already fully existant VECMPI then basically destroy it and rebuild with ghosting */
787:   if (flg) {
788:     PetscInt               n,N;
789:     Vec_MPI                *w;
790:     PetscScalar            *larray;
791:     IS                     from,to;
792:     ISLocalToGlobalMapping ltog;
793:     PetscInt               rstart,i,*indices;
794:     MPI_Comm               comm;

796:     PetscObjectGetComm((PetscObject)vv,&comm);
797:     n    = vv->map->n;
798:     N    = vv->map->N;
799:     (*vv->ops->destroy)(vv);
800:     VecSetSizes(vv,n,N);
801:     VecCreate_MPI_Private(vv,PETSC_TRUE,nghost,NULL);
802:     w    = (Vec_MPI*)(vv)->data;
803:     /* Create local representation */
804:     VecGetArray(vv,&larray);
805:     VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
806:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localrep);
807:     VecRestoreArray(vv,&larray);

809:     /*
810:      Create scatter context for scattering (updating) ghost values
811:      */
812:     ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
813:     ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
814:     VecScatterCreate(vv,from,w->localrep,to,&w->localupdate);
815:     PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localupdate);
816:     ISDestroy(&to);
817:     ISDestroy(&from);

819:     /* set local to global mapping for ghosted vector */
820:     PetscMalloc1(n+nghost,&indices);
821:     VecGetOwnershipRange(vv,&rstart,NULL);

823:     for (i=0; i<n; i++)      indices[i]   = rstart + i;
824:     for (i=0; i<nghost; i++) indices[n+i] = ghosts[i];

826:     ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,&ltog);
827:     VecSetLocalToGlobalMapping(vv,ltog);
828:     ISLocalToGlobalMappingDestroy(&ltog);
829:   } else if (vv->ops->create == VecCreate_MPI) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set local or global size before setting ghosting");
830:   else if (!((PetscObject)vv)->type_name) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set type to VECMPI before ghosting");
831:   return(0);
832: }


835: /* ------------------------------------------------------------------------------------------*/
836: /*@C
837:    VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
838:    the caller allocates the array space. Indices in the ghost region are based on blocks.

840:    Collective

842:    Input Parameters:
843: +  comm - the MPI communicator to use
844: .  bs - block size
845: .  n - local vector length
846: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
847: .  nghost - number of local ghost blocks
848: .  ghosts - global indices of ghost blocks (or NULL if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
849: -  array - the space to store the vector values (as long as n + nghost*bs)

851:    Output Parameter:
852: .  vv - the global vector representation (without ghost points as part of vector)

854:    Notes:
855:    Use VecGhostGetLocalForm() to access the local, ghosted representation
856:    of the vector.

858:    n is the local vector size (total local size not the number of blocks) while nghost
859:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
860:    portion is bs*nghost

862:    Level: advanced


865: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
866:           VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
867:           VecCreateGhostWithArray(), VecCreateGhostBlock()

869: @*/
870: PetscErrorCode  VecCreateGhostBlockWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
871: {
872:   PetscErrorCode         ierr;
873:   Vec_MPI                *w;
874:   PetscScalar            *larray;
875:   IS                     from,to;
876:   ISLocalToGlobalMapping ltog;
877:   PetscInt               rstart,i,nb,*indices;

880:   *vv = NULL;

882:   if (n == PETSC_DECIDE)      SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
883:   if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
884:   if (nghost < 0)             SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
885:   if (n % bs)                 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Local size must be a multiple of block size");
886:   PetscSplitOwnership(comm,&n,&N);
887:   /* Create global representation */
888:   VecCreate(comm,vv);
889:   VecSetSizes(*vv,n,N);
890:   VecSetBlockSize(*vv,bs);
891:   VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost*bs,array);
892:   w    = (Vec_MPI*)(*vv)->data;
893:   /* Create local representation */
894:   VecGetArray(*vv,&larray);
895:   VecCreateSeqWithArray(PETSC_COMM_SELF,bs,n+bs*nghost,larray,&w->localrep);
896:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
897:   VecRestoreArray(*vv,&larray);

899:   /*
900:        Create scatter context for scattering (updating) ghost values
901:   */
902:   ISCreateBlock(comm,bs,nghost,ghosts,PETSC_COPY_VALUES,&from);
903:   ISCreateStride(PETSC_COMM_SELF,bs*nghost,n,1,&to);
904:   VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
905:   PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
906:   ISDestroy(&to);
907:   ISDestroy(&from);

909:   /* set local to global mapping for ghosted vector */
910:   nb     = n/bs;
911:   PetscMalloc1(nb+nghost,&indices);
912:   VecGetOwnershipRange(*vv,&rstart,NULL);
913:   rstart = rstart/bs;

915:   for (i=0; i<nb; i++)      indices[i]    = rstart + i;
916:   for (i=0; i<nghost; i++)  indices[nb+i] = ghosts[i];

918:   ISLocalToGlobalMappingCreate(comm,bs,nb+nghost,indices,PETSC_OWN_POINTER,&ltog);
919:   VecSetLocalToGlobalMapping(*vv,ltog);
920:   ISLocalToGlobalMappingDestroy(&ltog);
921:   return(0);
922: }

924: /*@
925:    VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
926:         The indicing of the ghost points is done with blocks.

928:    Collective

930:    Input Parameters:
931: +  comm - the MPI communicator to use
932: .  bs - the block size
933: .  n - local vector length
934: .  N - global vector length (or PETSC_DECIDE to have calculated if n is given)
935: .  nghost - number of local ghost blocks
936: -  ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)

938:    Output Parameter:
939: .  vv - the global vector representation (without ghost points as part of vector)

941:    Notes:
942:    Use VecGhostGetLocalForm() to access the local, ghosted representation
943:    of the vector.

945:    n is the local vector size (total local size not the number of blocks) while nghost
946:    is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
947:    portion is bs*nghost

949:    Level: advanced

951: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
952:           VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
953:           VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecCreateGhostBlockWithArray()

955: @*/
956: PetscErrorCode  VecCreateGhostBlock(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
957: {

961:   VecCreateGhostBlockWithArray(comm,bs,n,N,nghost,ghosts,NULL,vv);
962:   return(0);
963: }