Actual source code: mpiviennacl.cxx
petsc-3.14.6 2021-03-30
2: /*
3: This file contains routines for Parallel vector operations.
4: */
5: #include <petscconf.h>
6: #include <../src/vec/vec/impls/mpi/pvecimpl.h>
7: #include <../src/vec/vec/impls/seq/seqviennacl/viennaclvecimpl.h>
9: /*MC
10: VECVIENNACL - VECVIENNACL = "viennacl" - A VECSEQVIENNACL on a single-process communicator, and VECMPIVIENNACL otherwise.
12: Options Database Keys:
13: . -vec_type viennacl - sets the vector type to VECVIENNACL during a call to VecSetFromOptions()
15: Level: beginner
17: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQVIENNACL, VECMPIVIENNACL, VECSTANDARD, VecType, VecCreateMPI(), VecCreateMPI()
18: M*/
20: PetscErrorCode VecDestroy_MPIViennaCL(Vec v)
21: {
25: try {
26: if (v->spptr) {
27: delete ((Vec_ViennaCL*)v->spptr)->GPUarray_allocated;
28: delete (Vec_ViennaCL*) v->spptr;
29: }
30: } catch(std::exception const & ex) {
31: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ViennaCL error: %s", ex.what());
32: }
33: VecDestroy_MPI(v);
34: return(0);
35: }
37: PetscErrorCode VecNorm_MPIViennaCL(Vec xin,NormType type,PetscReal *z)
38: {
39: PetscReal sum,work = 0.0;
43: if (type == NORM_2 || type == NORM_FROBENIUS) {
44: VecNorm_SeqViennaCL(xin,NORM_2,&work);
45: work *= work;
46: MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));
47: *z = PetscSqrtReal(sum);
48: } else if (type == NORM_1) {
49: /* Find the local part */
50: VecNorm_SeqViennaCL(xin,NORM_1,&work);
51: /* Find the global max */
52: MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));
53: } else if (type == NORM_INFINITY) {
54: /* Find the local max */
55: VecNorm_SeqViennaCL(xin,NORM_INFINITY,&work);
56: /* Find the global max */
57: MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));
58: } else if (type == NORM_1_AND_2) {
59: PetscReal temp[2];
60: VecNorm_SeqViennaCL(xin,NORM_1,temp);
61: VecNorm_SeqViennaCL(xin,NORM_2,temp+1);
62: temp[1] = temp[1]*temp[1];
63: MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));
64: z[1] = PetscSqrtReal(z[1]);
65: }
66: return(0);
67: }
69: PetscErrorCode VecDot_MPIViennaCL(Vec xin,Vec yin,PetscScalar *z)
70: {
71: PetscScalar sum,work;
75: VecDot_SeqViennaCL(xin,yin,&work);
76: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
77: *z = sum;
78: return(0);
79: }
81: PetscErrorCode VecTDot_MPIViennaCL(Vec xin,Vec yin,PetscScalar *z)
82: {
83: PetscScalar sum,work;
87: VecTDot_SeqViennaCL(xin,yin,&work);
88: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
89: *z = sum;
90: return(0);
91: }
93: PetscErrorCode VecMDot_MPIViennaCL(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
94: {
95: PetscScalar awork[128],*work = awork;
99: if (nv > 128) {
100: PetscMalloc1(nv,&work);
101: }
102: VecMDot_SeqViennaCL(xin,nv,y,work);
103: MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
104: if (nv > 128) {
105: PetscFree(work);
106: }
107: return(0);
108: }
110: /*MC
111: VECMPIVIENNACL - VECMPIVIENNACL = "mpiviennacl" - The basic parallel vector, modified to use ViennaCL
113: Options Database Keys:
114: . -vec_type mpiviennacl - sets the vector type to VECMPIVIENNACL during a call to VecSetFromOptions()
116: Level: beginner
118: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMPI()
119: M*/
122: PetscErrorCode VecDuplicate_MPIViennaCL(Vec win,Vec *v)
123: {
125: Vec_MPI *vw,*w = (Vec_MPI*)win->data;
126: PetscScalar *array;
129: VecCreate(PetscObjectComm((PetscObject)win),v);
130: PetscLayoutReference(win->map,&(*v)->map);
132: VecCreate_MPI_Private(*v,PETSC_FALSE,w->nghost,0);
133: vw = (Vec_MPI*)(*v)->data;
134: PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));
136: /* save local representation of the parallel vector (and scatter) if it exists */
137: if (w->localrep) {
138: VecGetArray(*v,&array);
139: VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);
140: PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
141: VecRestoreArray(*v,&array);
142: PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);
143: vw->localupdate = w->localupdate;
144: if (vw->localupdate) {
145: PetscObjectReference((PetscObject)vw->localupdate);
146: }
147: }
149: /* New vector should inherit stashing property of parent */
150: (*v)->stash.donotstash = win->stash.donotstash;
151: (*v)->stash.ignorenegidx = win->stash.ignorenegidx;
153: /* change type_name appropriately */
154: PetscObjectChangeTypeName((PetscObject)(*v),VECMPIVIENNACL);
156: PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
157: PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);
158: (*v)->map->bs = PetscAbs(win->map->bs);
159: (*v)->bstash.bs = win->bstash.bs;
160: return(0);
161: }
163: PetscErrorCode VecDotNorm2_MPIViennaCL(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
164: {
166: PetscScalar work[2],sum[2];
169: VecDotNorm2_SeqViennaCL(s,t,work,work+1);
170: MPIU_Allreduce((void*)&work,(void*)&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));
171: *dp = sum[0];
172: *nm = sum[1];
173: return(0);
174: }
177: PetscErrorCode VecBindToCPU_MPIViennaCL(Vec vv, PetscBool pin)
178: {
181: vv->boundtocpu = pin;
183: if (pin) {
184: VecViennaCLCopyFromGPU(vv);
185: vv->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
186: vv->ops->dotnorm2 = NULL;
187: vv->ops->waxpy = VecWAXPY_Seq;
188: vv->ops->dot = VecDot_MPI;
189: vv->ops->mdot = VecMDot_MPI;
190: vv->ops->tdot = VecTDot_MPI;
191: vv->ops->norm = VecNorm_MPI;
192: vv->ops->scale = VecScale_Seq;
193: vv->ops->copy = VecCopy_Seq;
194: vv->ops->set = VecSet_Seq;
195: vv->ops->swap = VecSwap_Seq;
196: vv->ops->axpy = VecAXPY_Seq;
197: vv->ops->axpby = VecAXPBY_Seq;
198: vv->ops->maxpy = VecMAXPY_Seq;
199: vv->ops->aypx = VecAYPX_Seq;
200: vv->ops->axpbypcz = VecAXPBYPCZ_Seq;
201: vv->ops->pointwisemult = VecPointwiseMult_Seq;
202: vv->ops->setrandom = VecSetRandom_Seq;
203: vv->ops->placearray = VecPlaceArray_Seq;
204: vv->ops->replacearray = VecReplaceArray_Seq;
205: vv->ops->resetarray = VecResetArray_Seq;
206: vv->ops->dot_local = VecDot_Seq;
207: vv->ops->tdot_local = VecTDot_Seq;
208: vv->ops->norm_local = VecNorm_Seq;
209: vv->ops->mdot_local = VecMDot_Seq;
210: vv->ops->pointwisedivide = VecPointwiseDivide_Seq;
211: vv->ops->getlocalvector = NULL;
212: vv->ops->restorelocalvector = NULL;
213: vv->ops->getlocalvectorread = NULL;
214: vv->ops->restorelocalvectorread = NULL;
215: vv->ops->getarraywrite = NULL;
216: } else {
217: vv->ops->dotnorm2 = VecDotNorm2_MPIViennaCL;
218: vv->ops->waxpy = VecWAXPY_SeqViennaCL;
219: vv->ops->duplicate = VecDuplicate_MPIViennaCL;
220: vv->ops->dot = VecDot_MPIViennaCL;
221: vv->ops->mdot = VecMDot_MPIViennaCL;
222: vv->ops->tdot = VecTDot_MPIViennaCL;
223: vv->ops->norm = VecNorm_MPIViennaCL;
224: vv->ops->scale = VecScale_SeqViennaCL;
225: vv->ops->copy = VecCopy_SeqViennaCL;
226: vv->ops->set = VecSet_SeqViennaCL;
227: vv->ops->swap = VecSwap_SeqViennaCL;
228: vv->ops->axpy = VecAXPY_SeqViennaCL;
229: vv->ops->axpby = VecAXPBY_SeqViennaCL;
230: vv->ops->maxpy = VecMAXPY_SeqViennaCL;
231: vv->ops->aypx = VecAYPX_SeqViennaCL;
232: vv->ops->axpbypcz = VecAXPBYPCZ_SeqViennaCL;
233: vv->ops->pointwisemult = VecPointwiseMult_SeqViennaCL;
234: vv->ops->setrandom = VecSetRandom_SeqViennaCL;
235: vv->ops->dot_local = VecDot_SeqViennaCL;
236: vv->ops->tdot_local = VecTDot_SeqViennaCL;
237: vv->ops->norm_local = VecNorm_SeqViennaCL;
238: vv->ops->mdot_local = VecMDot_SeqViennaCL;
239: vv->ops->destroy = VecDestroy_MPIViennaCL;
240: vv->ops->pointwisedivide = VecPointwiseDivide_SeqViennaCL;
241: vv->ops->placearray = VecPlaceArray_SeqViennaCL;
242: vv->ops->replacearray = VecReplaceArray_SeqViennaCL;
243: vv->ops->resetarray = VecResetArray_SeqViennaCL;
244: /*
245: get values?
246: */
247: }
248: return(0);
249: }
252: PETSC_EXTERN PetscErrorCode VecCreate_MPIViennaCL(Vec vv)
253: {
257: PetscLayoutSetUp(vv->map);
258: VecViennaCLAllocateCheck(vv);
259: VecCreate_MPIViennaCL_Private(vv,PETSC_FALSE,0,((Vec_ViennaCL*)(vv->spptr))->GPUarray);
260: VecViennaCLAllocateCheckHost(vv);
261: VecSet(vv,0.0);
262: VecSet_Seq(vv,0.0);
263: vv->offloadmask = PETSC_OFFLOAD_BOTH;
264: return(0);
265: }
268: PETSC_EXTERN PetscErrorCode VecCreate_ViennaCL(Vec v)
269: {
271: PetscMPIInt size;
274: MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
275: if (size == 1) {
276: VecSetType(v,VECSEQVIENNACL);
277: } else {
278: VecSetType(v,VECMPIVIENNACL);
279: }
280: return(0);
281: }
283: /*@C
284: VecCreateMPIViennaCLWithArray - Creates a parallel, array-style vector,
285: where the user provides the viennacl vector to store the vector values.
287: Collective
289: Input Parameters:
290: + comm - the MPI communicator to use
291: . bs - block size, same meaning as VecSetBlockSize()
292: . n - local vector length, cannot be PETSC_DECIDE
293: . N - global vector length (or PETSC_DECIDE to have calculated)
294: - array - the user provided GPU array to store the vector values
296: Output Parameter:
297: . vv - the vector
299: Notes:
300: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
301: same type as an existing vector.
303: If the user-provided array is NULL, then VecViennaCLPlaceArray() can be used
304: at a later stage to SET the array for storing the vector values.
306: PETSc does NOT free the array when the vector is destroyed via VecDestroy().
307: The user should not free the array until the vector is destroyed.
309: Level: intermediate
311: .seealso: VecCreateSeqViennaCLWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
312: VecCreate(), VecCreateMPI(), VecCreateGhostWithArray(), VecViennaCLPlaceArray()
314: @*/
315: PetscErrorCode VecCreateMPIViennaCLWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const ViennaCLVector *array,Vec *vv)
316: {
320: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
321: PetscSplitOwnership(comm,&n,&N);
322: VecCreate(comm,vv);
323: VecSetSizes(*vv,n,N);
324: VecSetBlockSize(*vv,bs);
325: VecCreate_MPIViennaCL_Private(*vv,PETSC_FALSE,0,array);
326: return(0);
327: }
329: /*@C
330: VecCreateMPIViennaCLWithArrays - Creates a parallel, array-style vector,
331: where the user provides the ViennaCL vector to store the vector values.
333: Collective
335: Input Parameters:
336: + comm - the MPI communicator to use
337: . bs - block size, same meaning as VecSetBlockSize()
338: . n - local vector length, cannot be PETSC_DECIDE
339: . N - global vector length (or PETSC_DECIDE to have calculated)
340: - cpuarray - the user provided CPU array to store the vector values
341: - viennaclvec - ViennaCL vector where the Vec entries are to be stored on the device.
343: Output Parameter:
344: . vv - the vector
346: Notes:
347: If both cpuarray and viennaclvec are provided, the caller must ensure that
348: the provided arrays have identical values.
350: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
351: same type as an existing vector.
353: PETSc does NOT free the provided arrays when the vector is destroyed via
354: VecDestroy(). The user should not free the array until the vector is
355: destroyed.
357: Level: intermediate
359: .seealso: VecCreateSeqViennaCLWithArrays(), VecCreateMPIWithArray()
360: VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
361: VecCreateMPI(), VecCreateGhostWithArray(), VecViennaCLPlaceArray(),
362: VecPlaceArray(), VecCreateMPICUDAWithArrays(),
363: VecViennaCLAllocateCheckHost()
364: @*/
365: PetscErrorCode VecCreateMPIViennaCLWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const ViennaCLVector *viennaclvec,Vec *vv)
366: {
370: VecCreateMPIViennaCLWithArray(comm,bs,n,N,viennaclvec,vv);
372: if (cpuarray && viennaclvec) {
373: Vec_MPI *s = (Vec_MPI*)((*vv)->data);
374: s->array = (PetscScalar*)cpuarray;
375: (*vv)->offloadmask = PETSC_OFFLOAD_BOTH;
376: } else if (cpuarray) {
377: Vec_MPI *s = (Vec_MPI*)((*vv)->data);
378: s->array = (PetscScalar*)cpuarray;
379: (*vv)->offloadmask = PETSC_OFFLOAD_CPU;
380: } else if (viennaclvec) {
381: (*vv)->offloadmask = PETSC_OFFLOAD_GPU;
382: } else {
383: (*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
384: }
386: return(0);
387: }
389: PetscErrorCode VecCreate_MPIViennaCL_Private(Vec vv,PetscBool alloc,PetscInt nghost,const ViennaCLVector *array)
390: {
392: Vec_ViennaCL *vecviennacl;
395: VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);
396: PetscObjectChangeTypeName((PetscObject)vv,VECMPIVIENNACL);
398: VecBindToCPU_MPIViennaCL(vv,PETSC_FALSE);
399: vv->ops->bindtocpu = VecBindToCPU_MPIViennaCL;
401: if (alloc && !array) {
402: VecViennaCLAllocateCheck(vv);
403: VecViennaCLAllocateCheckHost(vv);
404: VecSet(vv,0.0);
405: VecSet_Seq(vv,0.0);
406: vv->offloadmask = PETSC_OFFLOAD_BOTH;
407: }
408: if (array) {
409: if (!vv->spptr)
410: vv->spptr = new Vec_ViennaCL;
411: vecviennacl = (Vec_ViennaCL*)vv->spptr;
412: vecviennacl->GPUarray_allocated = 0;
413: vecviennacl->GPUarray = (ViennaCLVector*)array;
414: vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
415: }
417: return(0);
418: }