Actual source code: cusparsematimpl.h
petsc-3.14.6 2021-03-30
4: #include <petscpkg_version.h>
5: #include <petsc/private/cudavecimpl.h>
7: #include <cusparse_v2.h>
9: #include <algorithm>
10: #include <vector>
12: #include <thrust/device_vector.h>
13: #include <thrust/device_ptr.h>
14: #include <thrust/device_malloc_allocator.h>
15: #include <thrust/transform.h>
16: #include <thrust/functional.h>
17: #include <thrust/sequence.h>
19: #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */
20: #define CHKERRCUSPARSE(stat) \
21: do { \
22: if (PetscUnlikely(stat)) { \
23: const char *name = cusparseGetErrorName(stat); \
24: const char *descr = cusparseGetErrorString(stat); \
25: SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_LIB,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr); \
26: } \
27: } while (0)
28: #else
29: #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusparse error %d",(int)stat);} while (0)
30: #endif
32: #if defined(PETSC_USE_COMPLEX)
33: #if defined(PETSC_USE_REAL_SINGLE)
34: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
35: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
36: #elif defined(PETSC_USE_REAL_DOUBLE)
37: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
38: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
39: #endif
40: #else
41: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
42: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
43: #endif
45: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
46: #define cusparse_create_analysis_info cusparseCreateCsrsv2Info
47: #define cusparse_destroy_analysis_info cusparseDestroyCsrsv2Info
48: #define cusparse_csr2csc cusparseCsr2cscEx2
49: #if defined(PETSC_USE_COMPLEX)
50: #if defined(PETSC_USE_REAL_SINGLE)
51: #define cusparse_scalartype CUDA_C_32F
52: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseCcsrsv2_bufferSize(a,b,c,d,e,(cuComplex*)(f),g,h,i,j)
53: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv2_analysis(a,b,c,d,e,(const cuComplex*)(f),g,h,i,j,k)
54: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseCcsrsv2_solve(a,b,c,d,(const cuComplex*)(e),f,(const cuComplex*)(g),h,i,j,(const cuComplex*)(k),(cuComplex*)(l),m,n)
55: #elif defined(PETSC_USE_REAL_DOUBLE)
56: #define cusparse_scalartype CUDA_C_64F
57: #define cusparse_get_svbuffsize(a,b,c,d,e,f,g,h,i,j) cusparseZcsrsv2_bufferSize(a,b,c,d,e,(cuDoubleComplex*)(f),g,h,i,j)
58: #define cusparse_analysis(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv2_analysis(a,b,c,d,e,(const cuDoubleComplex*)(f),g,h,i,j,k)
59: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusparseZcsrsv2_solve(a,b,c,d,(const cuDoubleComplex*)(e),f,(const cuDoubleComplex*)(g),h,i,j,(const cuDoubleComplex*)(k),(cuDoubleComplex*)(l),m,n)
60: #endif
61: #else /* not complex */
62: #if defined(PETSC_USE_REAL_SINGLE)
63: #define cusparse_scalartype CUDA_R_32F
64: #define cusparse_get_svbuffsize cusparseScsrsv2_bufferSize
65: #define cusparse_analysis cusparseScsrsv2_analysis
66: #define cusparse_solve cusparseScsrsv2_solve
67: #elif defined(PETSC_USE_REAL_DOUBLE)
68: #define cusparse_scalartype CUDA_R_64F
69: #define cusparse_get_svbuffsize cusparseDcsrsv2_bufferSize
70: #define cusparse_analysis cusparseDcsrsv2_analysis
71: #define cusparse_solve cusparseDcsrsv2_solve
72: #endif
73: #endif
74: #else
75: #define cusparse_create_analysis_info cusparseCreateSolveAnalysisInfo
76: #define cusparse_destroy_analysis_info cusparseDestroySolveAnalysisInfo
77: #if defined(PETSC_USE_COMPLEX)
78: #if defined(PETSC_USE_REAL_SINGLE)
79: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
80: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
81: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
82: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseCcsrmm((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(j),(k),(cuComplex*)(l),(m),(cuComplex*)(n),(cuComplex*)(o),(p))
83: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
84: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
85: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
86: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
87: #elif defined(PETSC_USE_REAL_DOUBLE)
88: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
89: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
90: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
91: #define cusparse_csr_spmm(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) cusparseZcsrmm((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(j),(k),(cuDoubleComplex*)(l),(m),(cuDoubleComplex*)(n),(cuDoubleComplex*)(o),(p))
92: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
93: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
94: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
95: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
96: #endif
97: #else
98: #if defined(PETSC_USE_REAL_SINGLE)
99: #define cusparse_solve cusparseScsrsv_solve
100: #define cusparse_analysis cusparseScsrsv_analysis
101: #define cusparse_csr_spmv cusparseScsrmv
102: #define cusparse_csr_spmm cusparseScsrmm
103: #define cusparse_csr2csc cusparseScsr2csc
104: #define cusparse_hyb_spmv cusparseShybmv
105: #define cusparse_csr2hyb cusparseScsr2hyb
106: #define cusparse_hyb2csr cusparseShyb2csr
107: #elif defined(PETSC_USE_REAL_DOUBLE)
108: #define cusparse_solve cusparseDcsrsv_solve
109: #define cusparse_analysis cusparseDcsrsv_analysis
110: #define cusparse_csr_spmv cusparseDcsrmv
111: #define cusparse_csr_spmm cusparseDcsrmm
112: #define cusparse_csr2csc cusparseDcsr2csc
113: #define cusparse_hyb_spmv cusparseDhybmv
114: #define cusparse_csr2hyb cusparseDcsr2hyb
115: #define cusparse_hyb2csr cusparseDhyb2csr
116: #endif
117: #endif
118: #endif
120: #define THRUSTINTARRAY32 thrust::device_vector<int>
121: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
122: #define THRUSTARRAY thrust::device_vector<PetscScalar>
124: /* A CSR matrix structure */
125: struct CsrMatrix {
126: PetscInt num_rows;
127: PetscInt num_cols;
128: PetscInt num_entries;
129: THRUSTINTARRAY32 *row_offsets;
130: THRUSTINTARRAY32 *column_indices;
131: THRUSTARRAY *values;
132: };
134: /* This is struct holding the relevant data needed to a MatSolve */
135: struct Mat_SeqAIJCUSPARSETriFactorStruct {
136: /* Data needed for triangular solve */
137: cusparseMatDescr_t descr;
138: cusparseOperation_t solveOp;
139: CsrMatrix *csrMat;
140: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
141: csrsv2Info_t solveInfo;
142: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
143: int solveBufferSize;
144: void *solveBuffer;
145: size_t csr2cscBufferSize; /* to transpose the triangular factor */
146: void *csr2cscBuffer;
147: Mat_SeqAIJCUSPARSETriFactorStruct() :
148: csrMat(NULL),solveBuffer(NULL),csr2cscBuffer(NULL), solvePolicy(CUSPARSE_SOLVE_POLICY_NO_LEVEL){} /* TODO: allow options database for policy */
149: #else
150: cusparseSolveAnalysisInfo_t solveInfo;
151: #endif
152: };
154: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
155: struct Mat_SeqAIJCUSPARSETriFactors {
156: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
157: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
158: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
159: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
160: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
161: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
162: THRUSTARRAY *workVector;
163: cusparseHandle_t handle; /* a handle to the cusparse library */
164: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
165: };
167: struct Mat_CusparseSpMV {
168: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
169: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
170: void *spmvBuffer;
171: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
172: cusparseDnVecDescr_t vecXDescr,vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
173: #endif
174: };
176: /* This is struct holding the relevant data needed to a MatMult */
177: struct Mat_SeqAIJCUSPARSEMultStruct {
178: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
179: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
180: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
181: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
182: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
183: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
184: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
185: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
186: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
187: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL) {
188: for (int i=0; i<3; i++) cuSpMV[i].initialized = PETSC_FALSE;
189: }
190: #endif
191: };
193: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
194: struct Mat_SeqAIJCUSPARSE {
195: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
196: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
197: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
198: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
199: PetscInt nrows; /* number of rows of the matrix seen by GPU */
200: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
201: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
202: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
203: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
204: PetscBool transgen; /* whether or not to generate explicit transpose for MatMultTranspose operations */
205: #if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
206: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
207: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNewLog() */
208: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
209: cusparseSpMVAlg_t spmvAlg;
210: cusparseSpMMAlg_t spmmAlg;
211: #endif
212: PetscSplitCSRDataStructure *deviceMat; /* Matrix on device for, eg, assembly */
213: };
215: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
216: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
217: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
218: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
219: #endif