Actual source code: cusparsematimpl.h
1: #pragma once
3: #include <petscpkg_version.h>
4: #include <../src/vec/vec/impls/seq/cupm/vecseqcupm.hpp>
5: #include <../src/sys/objects/device/impls/cupm/cupmthrustutility.hpp>
6: #include <petsc/private/petsclegacycupmblas.h>
8: #include <cusparse_v2.h>
10: #include <algorithm>
11: #include <vector>
13: #include <thrust/device_vector.h>
14: #include <thrust/device_ptr.h>
15: #include <thrust/device_malloc_allocator.h>
16: #include <thrust/transform.h>
17: #include <thrust/functional.h>
18: #include <thrust/sequence.h>
19: #include <thrust/system/system_error.h>
21: #if defined(PETSC_USE_COMPLEX)
22: #if defined(PETSC_USE_REAL_SINGLE)
23: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
24: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
25: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
26: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
27: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
28: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
29: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
30: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
31: #elif defined(PETSC_USE_REAL_DOUBLE)
32: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
33: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
34: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
35: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
36: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
37: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
38: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
39: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
40: #endif
41: #else
42: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
43: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
44: #if defined(PETSC_USE_REAL_SINGLE)
45: #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize
46: #define cusparseXcsrilu02_analysis cusparseScsrilu02_analysis
47: #define cusparseXcsrilu02 cusparseScsrilu02
48: #define cusparseXcsric02_bufferSize cusparseScsric02_bufferSize
49: #define cusparseXcsric02_analysis cusparseScsric02_analysis
50: #define cusparseXcsric02 cusparseScsric02
51: #elif defined(PETSC_USE_REAL_DOUBLE)
52: #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize
53: #define cusparseXcsrilu02_analysis cusparseDcsrilu02_analysis
54: #define cusparseXcsrilu02 cusparseDcsrilu02
55: #define cusparseXcsric02_bufferSize cusparseDcsric02_bufferSize
56: #define cusparseXcsric02_analysis cusparseDcsric02_analysis
57: #define cusparseXcsric02 cusparseDcsric02
58: #endif
59: #endif
61: #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
62: #define csrsvInfo_t csrsv2Info_t
63: #define cusparseCreateCsrsvInfo cusparseCreateCsrsv2Info
64: #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info
65: #if defined(PETSC_USE_COMPLEX)
66: #if defined(PETSC_USE_REAL_SINGLE)
67: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j)
68: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k)
69: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n)
70: #elif defined(PETSC_USE_REAL_DOUBLE)
71: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j)
72: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k)
73: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n)
74: #endif
75: #else /* not complex */
76: #if defined(PETSC_USE_REAL_SINGLE)
77: #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize
78: #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis
79: #define cusparseXcsrsv_solve cusparseScsrsv2_solve
80: #elif defined(PETSC_USE_REAL_DOUBLE)
81: #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize
82: #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis
83: #define cusparseXcsrsv_solve cusparseDcsrsv2_solve
84: #endif
85: #endif
86: #else /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
87: #define csrsvInfo_t cusparseSolveAnalysisInfo_t
88: #define cusparseCreateCsrsvInfo cusparseCreateSolveAnalysisInfo
89: #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo
90: #if defined(PETSC_USE_COMPLEX)
91: #if defined(PETSC_USE_REAL_SINGLE)
92: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(e), (f), (cuComplex *)(g), (h), (i), (j), (cuComplex *)(k), (cuComplex *)(l))
93: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i))
94: #elif defined(PETSC_USE_REAL_DOUBLE)
95: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) \
96: cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(e), (f), (cuDoubleComplex *)(g), (h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l))
97: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i))
98: #endif
99: #else /* not complex */
100: #if defined(PETSC_USE_REAL_SINGLE)
101: #define cusparseXcsrsv_solve cusparseScsrsv_solve
102: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseScsrsv_analysis(a, b, c, d, e, f, g, h, i)
103: #elif defined(PETSC_USE_REAL_DOUBLE)
104: #define cusparseXcsrsv_solve cusparseDcsrsv_solve
105: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseDcsrsv_analysis(a, b, c, d, e, f, g, h, i)
106: #endif
107: #endif
108: #endif /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
110: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
111: #define cusparse_csr2csc cusparseCsr2cscEx2
112: #if defined(PETSC_USE_COMPLEX)
113: #if defined(PETSC_USE_REAL_SINGLE)
114: #define cusparse_scalartype CUDA_C_32F
115: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
116: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
117: cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
118: #elif defined(PETSC_USE_REAL_DOUBLE)
119: #define cusparse_scalartype CUDA_C_64F
120: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
121: cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
122: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
123: cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
124: #endif
125: #else /* not complex */
126: #if defined(PETSC_USE_REAL_SINGLE)
127: #define cusparse_scalartype CUDA_R_32F
128: #define cusparse_csr_spgeam cusparseScsrgeam2
129: #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
130: #elif defined(PETSC_USE_REAL_DOUBLE)
131: #define cusparse_scalartype CUDA_R_64F
132: #define cusparse_csr_spgeam cusparseDcsrgeam2
133: #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
134: #endif
135: #endif
136: #else /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
137: #if defined(PETSC_USE_COMPLEX)
138: #if defined(PETSC_USE_REAL_SINGLE)
139: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m))
140: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p))
141: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l))
142: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h))
143: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j))
144: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f))
145: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t)
146: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s)
147: #elif defined(PETSC_USE_REAL_DOUBLE)
148: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m))
149: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
150: cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p))
151: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l))
152: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h))
153: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j))
154: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f))
155: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t)
156: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \
157: cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s)
158: #endif
159: #else
160: #if defined(PETSC_USE_REAL_SINGLE)
161: #define cusparse_csr_spmv cusparseScsrmv
162: #define cusparse_csr_spmm cusparseScsrmm
163: #define cusparse_csr2csc cusparseScsr2csc
164: #define cusparse_hyb_spmv cusparseShybmv
165: #define cusparse_csr2hyb cusparseScsr2hyb
166: #define cusparse_hyb2csr cusparseShyb2csr
167: #define cusparse_csr_spgemm cusparseScsrgemm
168: #define cusparse_csr_spgeam cusparseScsrgeam
169: #elif defined(PETSC_USE_REAL_DOUBLE)
170: #define cusparse_csr_spmv cusparseDcsrmv
171: #define cusparse_csr_spmm cusparseDcsrmm
172: #define cusparse_csr2csc cusparseDcsr2csc
173: #define cusparse_hyb_spmv cusparseDhybmv
174: #define cusparse_csr2hyb cusparseDcsr2hyb
175: #define cusparse_hyb2csr cusparseDhyb2csr
176: #define cusparse_csr_spgemm cusparseDcsrgemm
177: #define cusparse_csr_spgeam cusparseDcsrgeam
178: #endif
179: #endif
180: #endif /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
182: #define THRUSTINTARRAY32 thrust::device_vector<int>
183: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
184: #define THRUSTARRAY thrust::device_vector<PetscScalar>
186: /* A CSR matrix structure */
187: struct CsrMatrix {
188: PetscInt num_rows;
189: PetscInt num_cols;
190: PetscInt num_entries;
191: THRUSTINTARRAY32 *row_offsets;
192: THRUSTINTARRAY32 *column_indices;
193: THRUSTARRAY *values;
194: };
196: /* This is struct holding the relevant data needed to a MatSolve */
197: struct Mat_SeqAIJCUSPARSETriFactorStruct {
198: /* Data needed for triangular solve */
199: cusparseMatDescr_t descr;
200: cusparseOperation_t solveOp;
201: CsrMatrix *csrMat;
202: #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
203: csrsvInfo_t solveInfo;
204: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
205: #endif
206: int solveBufferSize;
207: void *solveBuffer;
208: size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
209: void *csr2cscBuffer;
210: PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
211: };
213: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
214: struct Mat_SeqAIJCUSPARSETriFactors {
215: #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0)
216: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
217: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
218: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
219: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
220: #endif
222: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
223: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
224: THRUSTARRAY *workVector;
225: cusparseHandle_t handle; /* a handle to the cusparse library */
226: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
227: cudaDeviceProp dev_prop;
228: PetscBool init_dev_prop;
230: PetscBool factorizeOnDevice; /* Do factorization on device or not */
231: #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0)
232: /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV,
233: which first appeared in cusparse-11.5 with cuda-11.3.
234: */
235: PetscScalar *csrVal, *diag; // the diagonal D in UtDU of Cholesky
236: int *csrRowPtr32, *csrColIdx32; // i,j of M. cusparseScsrilu02/ic02() etc require 32-bit indices
238: PetscInt *csrRowPtr, *csrColIdx; // i, j of M on device for CUDA APIs that support 64-bit indices
239: PetscScalar *csrVal_h, *diag_h; // Since LU is done on host, we prepare a factored matrix in regular csr format on host and then copy it to device
240: PetscInt *csrRowPtr_h; // csrColIdx_h is temporary, so it is not here
242: /* Mixed mat descriptor types? yes, different cusparse APIs use different types */
243: cusparseMatDescr_t matDescr_M;
244: cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U;
245: cusparseSpSVDescr_t spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut;
247: cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y;
248: PetscScalar *X, *Y; /* data array of dnVec X and Y */
250: /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */
251: int factBufferSize_M; /* M ~= LU or LLt */
252: size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut;
253: /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut.
254: So save memory, we share the factorization buffer with one of spsvBuffer_L/U.
255: */
256: void *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut;
258: csrilu02Info_t ilu0Info_M;
259: csric02Info_t ic0Info_M;
260: int structural_zero, numerical_zero;
261: cusparseSolvePolicy_t policy_M;
263: /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */
264: PetscBool createdTransposeSpSVDescr; /* Have we created SpSV descriptors for Lt, Ut? */
265: PetscBool updatedTransposeSpSVAnalysis; /* Have we updated SpSV analysis with the latest L, U values? */
267: PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */
268: #endif
269: };
271: struct Mat_CusparseSpMV {
272: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
273: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
274: void *spmvBuffer;
275: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
276: cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
277: #endif
278: };
280: /* This is struct holding the relevant data needed to a MatMult */
281: struct Mat_SeqAIJCUSPARSEMultStruct {
282: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
283: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
284: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
285: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
286: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
287: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
288: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
289: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
290: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
291: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL)
292: {
293: for (int i = 0; i < 3; i++) cuSpMV[i].initialized = PETSC_FALSE;
294: }
295: #endif
296: };
298: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
299: struct Mat_SeqAIJCUSPARSE {
300: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
301: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
302: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
303: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
304: PetscInt nrows; /* number of rows of the matrix seen by GPU */
305: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
306: PetscBool use_cpu_solve; /* Use AIJ_Seq (I)LU solve */
307: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
308: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
309: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
310: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
311: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
312: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNew() */
313: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
314: cusparseSpMVAlg_t spmvAlg;
315: cusparseSpMMAlg_t spmmAlg;
316: #endif
317: THRUSTINTARRAY *csr2csc_i;
318: THRUSTINTARRAY *coords; /* permutation array used in MatSeqAIJCUSPARSEMergeMats */
319: };
321: typedef struct Mat_SeqAIJCUSPARSETriFactors *Mat_SeqAIJCUSPARSETriFactors_p;
323: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
324: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *);
325: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *);
327: using VecSeq_CUDA = Petsc::vec::cupm::impl::VecSeq_CUPM<Petsc::device::cupm::DeviceType::CUDA>;
329: static inline bool isCudaMem(const void *data)
330: {
331: using namespace Petsc::device::cupm;
332: auto mtype = PETSC_MEMTYPE_HOST;
334: PetscFunctionBegin;
335: PetscCallAbort(PETSC_COMM_SELF, impl::Interface<DeviceType::CUDA>::PetscCUPMGetMemType(data, &mtype));
336: PetscFunctionReturn(PetscMemTypeDevice(mtype));
337: }