Actual source code: cusparsematimpl.h
4: #include <petscpkg_version.h>
5: #include <petsc/private/cudavecimpl.h>
7: #include <cusparse_v2.h>
9: #include <algorithm>
10: #include <vector>
12: #include <thrust/device_vector.h>
13: #include <thrust/device_ptr.h>
14: #include <thrust/device_malloc_allocator.h>
15: #include <thrust/transform.h>
16: #include <thrust/functional.h>
17: #include <thrust/sequence.h>
18: #include <thrust/system/system_error.h>
20: #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */
21: #define CHKERRCUSPARSE(stat)\
22: do {\
23: if (PetscUnlikely(stat)) {\
24: const char *name = cusparseGetErrorName(stat);\
25: const char *descr = cusparseGetErrorString(stat);\
26: if ((stat == CUSPARSE_STATUS_NOT_INITIALIZED) || (stat == CUSPARSE_STATUS_ALLOC_FAILED)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_GPU_RESOURCE,"cuSPARSE error %d (%s) : %s. Reports not initialized or alloc failed; this indicates the GPU has run out resources",(int)stat,name,descr); \
27: else SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_GPU,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr);\
28: }\
29: } while (0)
30: #else
31: #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_GPU,"cusparse error %d",(int)stat);} while (0)
32: #endif
34: #define PetscStackCallThrust(body) do { \
35: try { \
36: body; \
37: } catch(thrust::system_error& e) { \
38: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error in Thrust %s",e.what());\
39: } \
40: } while (0)
42: #if defined(PETSC_USE_COMPLEX)
43: #if defined(PETSC_USE_REAL_SINGLE)
44: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
45: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
46: #elif defined(PETSC_USE_REAL_DOUBLE)
47: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
48: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
49: #endif
50: #else
51: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
52: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
53: #endif
55: #if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
56: #define cusparse_create_analysis_info cusparseCreateCsrsv2Info
57: #define cusparse_destroy_analysis_info cusparseDestroyCsrsv2Info
58: #if defined(PETSC_USE_COMPLEX)
59: #if defined(PETSC_USE_REAL_SINGLE)
60: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseCcsrsv2_bufferSize(a,b,c,d,e,(cuComplex*)(f),g,h,i,j)
61: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv2_analysis(a,b,c,d,e,(const cuComplex*)(f),g,h,i,j,k)
62: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseCcsrsv2_solve(a,b,c,d,(const cuComplex*)(e),f,(const cuComplex*)(g),h,i,j,(const cuComplex*)(k),(cuComplex*)(l),m,n)
63: #elif defined(PETSC_USE_REAL_DOUBLE)
64: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseZcsrsv2_bufferSize(a,b,c,d,e,(cuDoubleComplex*)(f),g,h,i,j)
65: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv2_analysis(a,b,c,d,e,(const cuDoubleComplex*)(f),g,h,i,j,k)
66: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseZcsrsv2_solve(a,b,c,d,(const cuDoubleComplex*)(e),f,(const cuDoubleComplex*)(g),h,i,j,(const cuDoubleComplex*)(k),(cuDoubleComplex*)(l),m,n)
67: #endif
68: #else /* not complex */
69: #if defined(PETSC_USE_REAL_SINGLE)
70: #define cusparse_get_svbuffsize cusparseScsrsv2_bufferSize
71: #define cusparse_analysis cusparseScsrsv2_analysis
72: #define cusparse_solve cusparseScsrsv2_solve
73: #elif defined(PETSC_USE_REAL_DOUBLE)
74: #define cusparse_get_svbuffsize cusparseDcsrsv2_bufferSize
75: #define cusparse_analysis cusparseDcsrsv2_analysis
76: #define cusparse_solve cusparseDcsrsv2_solve
77: #endif
78: #endif
79: #else
80: #define cusparse_create_analysis_info cusparseCreateSolveAnalysisInfo
81: #define cusparse_destroy_analysis_info cusparseDestroySolveAnalysisInfo
82: #if defined(PETSC_USE_COMPLEX)
83: #if defined(PETSC_USE_REAL_SINGLE)
84: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
85: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
86: #elif defined(PETSC_USE_REAL_DOUBLE)
87: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
88: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
89: #endif
90: #else /* not complex */
91: #if defined(PETSC_USE_REAL_SINGLE)
92: #define cusparse_solve cusparseScsrsv_solve
93: #define cusparse_analysis cusparseScsrsv_analysis
94: #elif defined(PETSC_USE_REAL_DOUBLE)
95: #define cusparse_solve cusparseDcsrsv_solve
96: #define cusparse_analysis cusparseDcsrsv_analysis
97: #endif
98: #endif
99: #endif
101: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
102: #define cusparse_csr2csc cusparseCsr2cscEx2
103: #if defined(PETSC_USE_COMPLEX)
104: #if defined(PETSC_USE_REAL_SINGLE)
105: #define cusparse_scalartype CUDA_C_32F
106: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgeam2(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s,t)
107: #define cusparse_csr_spgeam_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgeam2_bufferSizeExt(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s,t)
108: #elif defined(PETSC_USE_REAL_DOUBLE)
109: #define cusparse_scalartype CUDA_C_64F
110: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgeam2(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s,t)
111: #define cusparse_csr_spgeam_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgeam2_bufferSizeExt(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s,t)
112: #endif
113: #else /* not complex */
114: #if defined(PETSC_USE_REAL_SINGLE)
115: #define cusparse_scalartype CUDA_R_32F
116: #define cusparse_csr_spgeam cusparseScsrgeam2
117: #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
118: #elif defined(PETSC_USE_REAL_DOUBLE)
119: #define cusparse_scalartype CUDA_R_64F
120: #define cusparse_csr_spgeam cusparseDcsrgeam2
121: #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
122: #endif
123: #endif
124: #else
125: #if defined(PETSC_USE_COMPLEX)
126: #if defined(PETSC_USE_REAL_SINGLE)
127: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
128: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseCcsrmm((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(j),(k),(cuComplex*)(l),(m),(cuComplex*)(n),(cuComplex*)(o),(p))
129: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
130: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
131: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
132: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
133: #define cusparse_csr_spgemm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgemm(a,b,c,d,e,f,g,h,(cuComplex*)i,j,k,l,m,(cuComplex*)n,o,p,q,(cuComplex*)r,s,t)
134: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s) cusparseCcsrgeam(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s)
135: #elif defined(PETSC_USE_REAL_DOUBLE)
136: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
137: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseZcsrmm((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(j),(k),(cuDoubleComplex*)(l),(m),(cuDoubleComplex*)(n),(cuDoubleComplex*)(o),(p))
138: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
139: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
140: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
141: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
142: #define cusparse_csr_spgemm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgemm(a,b,c,d,e,f,g,h,(cuDoubleComplex*)i,j,k,l,m,(cuDoubleComplex*)n,o,p,q,(cuDoubleComplex*)r,s,t)
143: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s) cusparseZcsrgeam(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s)
144: #endif
145: #else
146: #if defined(PETSC_USE_REAL_SINGLE)
147: #define cusparse_csr_spmv cusparseScsrmv
148: #define cusparse_csr_spmm cusparseScsrmm
149: #define cusparse_csr2csc cusparseScsr2csc
150: #define cusparse_hyb_spmv cusparseShybmv
151: #define cusparse_csr2hyb cusparseScsr2hyb
152: #define cusparse_hyb2csr cusparseShyb2csr
153: #define cusparse_csr_spgemm cusparseScsrgemm
154: #define cusparse_csr_spgeam cusparseScsrgeam
155: #elif defined(PETSC_USE_REAL_DOUBLE)
156: #define cusparse_csr_spmv cusparseDcsrmv
157: #define cusparse_csr_spmm cusparseDcsrmm
158: #define cusparse_csr2csc cusparseDcsr2csc
159: #define cusparse_hyb_spmv cusparseDhybmv
160: #define cusparse_csr2hyb cusparseDcsr2hyb
161: #define cusparse_hyb2csr cusparseDhyb2csr
162: #define cusparse_csr_spgemm cusparseDcsrgemm
163: #define cusparse_csr_spgeam cusparseDcsrgeam
164: #endif
165: #endif
166: #endif
168: #define THRUSTINTARRAY32 thrust::device_vector<int>
169: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
170: #define THRUSTARRAY thrust::device_vector<PetscScalar>
172: /* A CSR matrix structure */
173: struct CsrMatrix {
174: PetscInt num_rows;
175: PetscInt num_cols;
176: PetscInt num_entries;
177: THRUSTINTARRAY32 *row_offsets;
178: THRUSTINTARRAY32 *column_indices;
179: THRUSTARRAY *values;
180: };
182: /* This is struct holding the relevant data needed to a MatSolve */
183: struct Mat_SeqAIJCUSPARSETriFactorStruct {
184: /* Data needed for triangular solve */
185: cusparseMatDescr_t descr;
186: cusparseOperation_t solveOp;
187: CsrMatrix *csrMat;
188: #if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
189: csrsv2Info_t solveInfo;
190: #else
191: cusparseSolveAnalysisInfo_t solveInfo;
192: #endif
193: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
194: int solveBufferSize;
195: void *solveBuffer;
196: size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
197: void *csr2cscBuffer;
198: PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
199: };
201: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
202: struct Mat_SeqAIJCUSPARSETriFactors {
203: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
204: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
205: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
206: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
207: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
208: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
209: THRUSTARRAY *workVector;
210: cusparseHandle_t handle; /* a handle to the cusparse library */
211: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
212: PetscScalar *a_band_d; /* GPU data for banded CSR LU factorization matrix diag(L)=1 */
213: int *i_band_d; /* this could be optimized away */
214: };
216: struct Mat_CusparseSpMV {
217: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
218: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
219: void *spmvBuffer;
220: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
221: cusparseDnVecDescr_t vecXDescr,vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
222: #endif
223: };
225: /* This is struct holding the relevant data needed to a MatMult */
226: struct Mat_SeqAIJCUSPARSEMultStruct {
227: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
228: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
229: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
230: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
231: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
232: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
233: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
234: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
235: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
236: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL) {
237: for (int i=0; i<3; i++) cuSpMV[i].initialized = PETSC_FALSE;
238: }
239: #endif
240: };
242: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
243: struct Mat_SeqAIJCUSPARSE {
244: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
245: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
246: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
247: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
248: PetscInt nrows; /* number of rows of the matrix seen by GPU */
249: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
250: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
251: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
252: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
253: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
254: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
255: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNewLog() */
256: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
257: cusparseSpMVAlg_t spmvAlg;
258: cusparseSpMMAlg_t spmmAlg;
259: #endif
260: THRUSTINTARRAY *csr2csc_i;
261: PetscSplitCSRDataStructure *deviceMat; /* Matrix on device for, eg, assembly */
262: THRUSTINTARRAY *cooPerm;
263: THRUSTINTARRAY *cooPerm_a;
264: };
266: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
267: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
268: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
269: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
270: PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
271: PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
272: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat,const PetscScalar**);
273: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat,const PetscScalar**);
274: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat,PetscScalar**);
275: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat,PetscScalar**);
276: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat,PetscScalar**);
277: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat,PetscScalar**);
278: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat,Mat,MatReuse,Mat*);
280: PETSC_STATIC_INLINE bool isCudaMem(const void *data)
281: {
282: cudaError_t cerr;
283: struct cudaPointerAttributes attr;
284: enum cudaMemoryType mtype;
285: cerr = cudaPointerGetAttributes(&attr,data); /* Do not check error since before CUDA 11.0, passing a host pointer returns cudaErrorInvalidValue */
286: cudaGetLastError(); /* Reset the last error */
287: #if (CUDART_VERSION < 10000)
288: mtype = attr.memoryType;
289: #else
290: mtype = attr.type;
291: #endif
292: if (cerr == cudaSuccess && mtype == cudaMemoryTypeDevice) return true;
293: else return false;
294: }
296: #endif