4: #include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h> 6: #if CUDA_VERSION>=4020
7: #include <cusparse_v2.h>
8: #else
9: #include <cusparse.h>
10: #endif
12: #include <algorithm>
13: #include <vector>
15: #include <thrust/device_vector.h>
16: #include <thrust/device_ptr.h>
17: #include <thrust/device_malloc_allocator.h>
18: #include <thrust/transform.h>
19: #include <thrust/functional.h>
20: #include <thrust/sequence.h>
22: #if defined(PETSC_USE_COMPLEX)
23: #if defined(PETSC_USE_REAL_SINGLE)
24: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k)) 25: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i)) 26: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m)) 27: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l)) 28: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h)) 29: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j)) 30: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f)) 31: cuFloatComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
32: cuFloatComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
33: #elif defined(PETSC_USE_REAL_DOUBLE)
34: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k)) 35: #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i)) 36: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m)) 37: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l)) 38: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h)) 39: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j)) 40: #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) 41: cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
42: cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
43: #endif
44: #else
45: PetscScalar PETSC_CUSPARSE_ONE = 1.0;
46: PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
47: #if defined(PETSC_USE_REAL_SINGLE)
48: #define cusparse_solve cusparseScsrsv_solve 49: #define cusparse_analysis cusparseScsrsv_analysis 50: #define cusparse_csr_spmv cusparseScsrmv 51: #define cusparse_csr2csc cusparseScsr2csc 52: #define cusparse_hyb_spmv cusparseShybmv 53: #define cusparse_csr2hyb cusparseScsr2hyb 54: #define cusparse_hyb2csr cusparseShyb2csr 55: #elif defined(PETSC_USE_REAL_DOUBLE)
56: #define cusparse_solve cusparseDcsrsv_solve 57: #define cusparse_analysis cusparseDcsrsv_analysis 58: #define cusparse_csr_spmv cusparseDcsrmv 59: #define cusparse_csr2csc cusparseDcsr2csc 60: #define cusparse_hyb_spmv cusparseDhybmv 61: #define cusparse_csr2hyb cusparseDcsr2hyb 62: #define cusparse_hyb2csr cusparseDhyb2csr 63: #endif
64: #endif
66: #define THRUSTINTARRAY32 thrust::device_vector<int> 67: #define THRUSTINTARRAY thrust::device_vector<PetscInt> 68: #define THRUSTARRAY thrust::device_vector<PetscScalar> 70: /* A CSR matrix structure */
71: struct CsrMatrix {
72: PetscInt num_rows;
73: PetscInt num_cols;
74: PetscInt num_entries;
75: THRUSTINTARRAY32 *row_offsets;
76: THRUSTINTARRAY32 *column_indices;
77: THRUSTARRAY *values;
78: };
80: //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory>
82: /* This is struct holding the relevant data needed to a MatSolve */
83: struct Mat_SeqAIJCUSPARSETriFactorStruct {
84: /* Data needed for triangular solve */
85: cusparseMatDescr_t descr;
86: cusparseSolveAnalysisInfo_t solveInfo;
87: cusparseOperation_t solveOp;
88: CsrMatrix *csrMat;
89: };
91: /* This is struct holding the relevant data needed to a MatMult */
92: struct Mat_SeqAIJCUSPARSEMultStruct {
93: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
94: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
95: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
96: PetscScalar *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
97: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
98: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
99: };
101: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
102: any indices used in a reordering */
103: struct Mat_SeqAIJCUSPARSETriFactors {
104: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
105: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
106: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
107: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
108: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
109: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
110: THRUSTARRAY *workVector;
111: cusparseHandle_t handle; /* a handle to the cusparse library */
112: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
113: };
115: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
116: struct Mat_SeqAIJCUSPARSE {
117: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
118: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
119: THRUSTARRAY *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
120: PetscInt nonzerorow; /* number of nonzero rows ... used in the flop calculations */
121: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
122: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
123: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
124: PetscObjectState nonzerostate;
125: };
127: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
128: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
129: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
130: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
131: #endif