Actual source code: cusparsematimpl.h
1: #if !defined(CUSPARSEMATIMPL)
2: #define CUSPARSEMATIMPL
4: #include <petscpkg_version.h>
5: #include <petsc/private/cudavecimpl.h>
6: #include <petscaijdevice.h>
8: #include <cusparse_v2.h>
10: #include <algorithm>
11: #include <vector>
13: #include <thrust/device_vector.h>
14: #include <thrust/device_ptr.h>
15: #include <thrust/device_malloc_allocator.h>
16: #include <thrust/transform.h>
17: #include <thrust/functional.h>
18: #include <thrust/sequence.h>
19: #include <thrust/system/system_error.h>
21: #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */
22: #define CHKERRCUSPARSE(stat)\
23: do {\
24: if (PetscUnlikely(stat)) {\
25: const char *name = cusparseGetErrorName(stat);\
26: const char *descr = cusparseGetErrorString(stat);\
27: if ((stat == CUSPARSE_STATUS_NOT_INITIALIZED) || (stat == CUSPARSE_STATUS_ALLOC_FAILED)) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_GPU_RESOURCE,"cuSPARSE errorcode %d (%s) : %s. Reports not initialized or alloc failed; this indicates the GPU has run out resources",(int)stat,name,descr); \
28: else SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_GPU,"cuSPARSE errorcode %d (%s) : %s",(int)stat,name,descr);\
29: }\
30: } while (0)
31: #else
32: #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_GPU,"cuSPARSE errorcode %d",(int)stat);} while (0)
33: #endif
35: #define PetscStackCallThrust(body) do { \
36: try { \
37: body; \
38: } catch(thrust::system_error& e) { \
39: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Error in Thrust %s",e.what());\
40: } \
41: } while (0)
43: #if defined(PETSC_USE_COMPLEX)
44: #if defined(PETSC_USE_REAL_SINGLE)
45: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
46: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
47: #elif defined(PETSC_USE_REAL_DOUBLE)
48: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
49: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
50: #endif
51: #else
52: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
53: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
54: #endif
56: #if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
57: #define cusparse_create_analysis_info cusparseCreateCsrsv2Info
58: #define cusparse_destroy_analysis_info cusparseDestroyCsrsv2Info
59: #if defined(PETSC_USE_COMPLEX)
60: #if defined(PETSC_USE_REAL_SINGLE)
61: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseCcsrsv2_bufferSize(a,b,c,d,e,(cuComplex*)(f),g,h,i,j)
62: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv2_analysis(a,b,c,d,e,(const cuComplex*)(f),g,h,i,j,k)
63: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseCcsrsv2_solve(a,b,c,d,(const cuComplex*)(e),f,(const cuComplex*)(g),h,i,j,(const cuComplex*)(k),(cuComplex*)(l),m,n)
64: #elif defined(PETSC_USE_REAL_DOUBLE)
65: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseZcsrsv2_bufferSize(a,b,c,d,e,(cuDoubleComplex*)(f),g,h,i,j)
66: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv2_analysis(a,b,c,d,e,(const cuDoubleComplex*)(f),g,h,i,j,k)
67: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseZcsrsv2_solve(a,b,c,d,(const cuDoubleComplex*)(e),f,(const cuDoubleComplex*)(g),h,i,j,(const cuDoubleComplex*)(k),(cuDoubleComplex*)(l),m,n)
68: #endif
69: #else /* not complex */
70: #if defined(PETSC_USE_REAL_SINGLE)
71: #define cusparse_get_svbuffsize cusparseScsrsv2_bufferSize
72: #define cusparse_analysis cusparseScsrsv2_analysis
73: #define cusparse_solve cusparseScsrsv2_solve
74: #elif defined(PETSC_USE_REAL_DOUBLE)
75: #define cusparse_get_svbuffsize cusparseDcsrsv2_bufferSize
76: #define cusparse_analysis cusparseDcsrsv2_analysis
77: #define cusparse_solve cusparseDcsrsv2_solve
78: #endif
79: #endif
80: #else
81: #define cusparse_create_analysis_info cusparseCreateSolveAnalysisInfo
82: #define cusparse_destroy_analysis_info cusparseDestroySolveAnalysisInfo
83: #if defined(PETSC_USE_COMPLEX)
84: #if defined(PETSC_USE_REAL_SINGLE)
85: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
86: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
87: #elif defined(PETSC_USE_REAL_DOUBLE)
88: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
89: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
90: #endif
91: #else /* not complex */
92: #if defined(PETSC_USE_REAL_SINGLE)
93: #define cusparse_solve cusparseScsrsv_solve
94: #define cusparse_analysis cusparseScsrsv_analysis
95: #elif defined(PETSC_USE_REAL_DOUBLE)
96: #define cusparse_solve cusparseDcsrsv_solve
97: #define cusparse_analysis cusparseDcsrsv_analysis
98: #endif
99: #endif
100: #endif
102: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
103: #define cusparse_csr2csc cusparseCsr2cscEx2
104: #if defined(PETSC_USE_COMPLEX)
105: #if defined(PETSC_USE_REAL_SINGLE)
106: #define cusparse_scalartype CUDA_C_32F
107: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgeam2(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s,t)
108: #define cusparse_csr_spgeam_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgeam2_bufferSizeExt(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s,t)
109: #elif defined(PETSC_USE_REAL_DOUBLE)
110: #define cusparse_scalartype CUDA_C_64F
111: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgeam2(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s,t)
112: #define cusparse_csr_spgeam_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgeam2_bufferSizeExt(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s,t)
113: #endif
114: #else /* not complex */
115: #if defined(PETSC_USE_REAL_SINGLE)
116: #define cusparse_scalartype CUDA_R_32F
117: #define cusparse_csr_spgeam cusparseScsrgeam2
118: #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
119: #elif defined(PETSC_USE_REAL_DOUBLE)
120: #define cusparse_scalartype CUDA_R_64F
121: #define cusparse_csr_spgeam cusparseDcsrgeam2
122: #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
123: #endif
124: #endif
125: #else
126: #if defined(PETSC_USE_COMPLEX)
127: #if defined(PETSC_USE_REAL_SINGLE)
128: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
129: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseCcsrmm((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(j),(k),(cuComplex*)(l),(m),(cuComplex*)(n),(cuComplex*)(o),(p))
130: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
131: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
132: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
133: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
134: #define cusparse_csr_spgemm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseCcsrgemm(a,b,c,d,e,f,g,h,(cuComplex*)i,j,k,l,m,(cuComplex*)n,o,p,q,(cuComplex*)r,s,t)
135: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s) cusparseCcsrgeam(a,b,c,(cuComplex*)d,e,f,(cuComplex*)g,h,i,(cuComplex*)j,k,l,(cuComplex*)m,n,o,p,(cuComplex*)q,r,s)
136: #elif defined(PETSC_USE_REAL_DOUBLE)
137: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
138: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseZcsrmm((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(j),(k),(cuDoubleComplex*)(l),(m),(cuDoubleComplex*)(n),(cuDoubleComplex*)(o),(p))
139: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
140: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
141: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
142: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
143: #define cusparse_csr_spgemm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t) cusparseZcsrgemm(a,b,c,d,e,f,g,h,(cuDoubleComplex*)i,j,k,l,m,(cuDoubleComplex*)n,o,p,q,(cuDoubleComplex*)r,s,t)
144: #define cusparse_csr_spgeam(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s) cusparseZcsrgeam(a,b,c,(cuDoubleComplex*)d,e,f,(cuDoubleComplex*)g,h,i,(cuDoubleComplex*)j,k,l,(cuDoubleComplex*)m,n,o,p,(cuDoubleComplex*)q,r,s)
145: #endif
146: #else
147: #if defined(PETSC_USE_REAL_SINGLE)
148: #define cusparse_csr_spmv cusparseScsrmv
149: #define cusparse_csr_spmm cusparseScsrmm
150: #define cusparse_csr2csc cusparseScsr2csc
151: #define cusparse_hyb_spmv cusparseShybmv
152: #define cusparse_csr2hyb cusparseScsr2hyb
153: #define cusparse_hyb2csr cusparseShyb2csr
154: #define cusparse_csr_spgemm cusparseScsrgemm
155: #define cusparse_csr_spgeam cusparseScsrgeam
156: #elif defined(PETSC_USE_REAL_DOUBLE)
157: #define cusparse_csr_spmv cusparseDcsrmv
158: #define cusparse_csr_spmm cusparseDcsrmm
159: #define cusparse_csr2csc cusparseDcsr2csc
160: #define cusparse_hyb_spmv cusparseDhybmv
161: #define cusparse_csr2hyb cusparseDcsr2hyb
162: #define cusparse_hyb2csr cusparseDhyb2csr
163: #define cusparse_csr_spgemm cusparseDcsrgemm
164: #define cusparse_csr_spgeam cusparseDcsrgeam
165: #endif
166: #endif
167: #endif
169: #define THRUSTINTARRAY32 thrust::device_vector<int>
170: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
171: #define THRUSTARRAY thrust::device_vector<PetscScalar>
173: /* A CSR matrix structure */
174: struct CsrMatrix {
175: PetscInt num_rows;
176: PetscInt num_cols;
177: PetscInt num_entries;
178: THRUSTINTARRAY32 *row_offsets;
179: THRUSTINTARRAY32 *column_indices;
180: THRUSTARRAY *values;
181: };
183: /* This is struct holding the relevant data needed to a MatSolve */
184: struct Mat_SeqAIJCUSPARSETriFactorStruct {
185: /* Data needed for triangular solve */
186: cusparseMatDescr_t descr;
187: cusparseOperation_t solveOp;
188: CsrMatrix *csrMat;
189: #if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
190: csrsv2Info_t solveInfo;
191: #else
192: cusparseSolveAnalysisInfo_t solveInfo;
193: #endif
194: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
195: int solveBufferSize;
196: void *solveBuffer;
197: size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
198: void *csr2cscBuffer;
199: PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
200: };
202: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
203: struct Mat_SeqAIJCUSPARSETriFactors {
204: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
205: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
206: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
207: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
208: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
209: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
210: THRUSTARRAY *workVector;
211: cusparseHandle_t handle; /* a handle to the cusparse library */
212: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
213: PetscScalar *a_band_d; /* GPU data for banded CSR LU factorization matrix diag(L)=1 */
214: int *i_band_d; /* this could be optimized away */
215: cudaDeviceProp dev_prop;
216: PetscBool init_dev_prop;
217: };
219: struct Mat_CusparseSpMV {
220: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
221: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
222: void *spmvBuffer;
223: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
224: cusparseDnVecDescr_t vecXDescr,vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
225: #endif
226: };
228: /* This is struct holding the relevant data needed to a MatMult */
229: struct Mat_SeqAIJCUSPARSEMultStruct {
230: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
231: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
232: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
233: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
234: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
235: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
236: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
237: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
238: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
239: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL) {
240: for (int i=0; i<3; i++) cuSpMV[i].initialized = PETSC_FALSE;
241: }
242: #endif
243: };
245: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
246: struct Mat_SeqAIJCUSPARSE {
247: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
248: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
249: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
250: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
251: PetscInt nrows; /* number of rows of the matrix seen by GPU */
252: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
253: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
254: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
255: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
256: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
257: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
258: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNewLog() */
259: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
260: cusparseSpMVAlg_t spmvAlg;
261: cusparseSpMMAlg_t spmmAlg;
262: #endif
263: THRUSTINTARRAY *csr2csc_i;
264: PetscSplitCSRDataStructure deviceMat; /* Matrix on device for, eg, assembly */
265: THRUSTINTARRAY *cooPerm; /* permutation array that sorts the input coo entris by row and col */
266: THRUSTINTARRAY *cooPerm_a; /* ordered array that indicate i-th nonzero (after sorting) is the j-th unique nonzero */
267: };
269: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
270: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
271: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
272: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
273: PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
274: PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
275: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat,Mat,MatReuse,Mat*);
276: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p*);
278: PETSC_STATIC_INLINE bool isCudaMem(const void *data)
279: {
280: cudaError_t cerr;
281: struct cudaPointerAttributes attr;
282: enum cudaMemoryType mtype;
283: cerr = cudaPointerGetAttributes(&attr,data); /* Do not check error since before CUDA 11.0, passing a host pointer returns cudaErrorInvalidValue */
284: cudaGetLastError(); /* Reset the last error */
285: #if (CUDART_VERSION < 10000)
286: mtype = attr.memoryType;
287: #else
288: mtype = attr.type;
289: #endif
290: if (cerr == cudaSuccess && mtype == cudaMemoryTypeDevice) return true;
291: else return false;
292: }
294: #endif