4: #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h>
6: #if CUDA_VERSION>=4020
7: #include <cusparse_v2.h>
8: #else
9: #include <cusparse.h>
10: #endif
12: #include <algorithm>
13: #include <vector>
15: #if defined(PETSC_USE_COMPLEX)
16: #if defined(PETSC_USE_REAL_SINGLE)
17: #define cusparse_solve cusparseCcsrsv_solve 18: #define cusparse_analysis cusparseCcsrsv_analysis 19: #define cusparse_csr_spmv cusparseCcsrmv 20: #define cusparse_csr2csc cusparseCcsr2csc 21: #define cusparse_hyb_spmv cusparseChybmv 22: #define cusparse_csr2hyb cusparseCcsr2hyb 23: #define cusparse_hyb2csr cusparseChyb2csr 24: cuFloatComplex ALPHA = {1.0f, 0.0f};
25: cuFloatComplex BETA = {0.0f, 0.0f};
26: #elif defined(PETSC_USE_REAL_DOUBLE)
27: #define cusparse_solve cusparseZcsrsv_solve 28: #define cusparse_analysis cusparseZcsrsv_analysis 29: #define cusparse_csr_spmv cusparseZcsrmv 30: #define cusparse_csr2csc cusparseZcsr2csc 31: #define cusparse_hyb_spmv cusparseZhybmv 32: #define cusparse_csr2hyb cusparseZcsr2hyb 33: #define cusparse_hyb2csr cusparseZhyb2csr 34: cuDoubleComplex ALPHA = {1.0, 0.0};
35: cuDoubleComplex BETA = {0.0, 0.0};
36: #endif
37: #else
38: PetscScalar ALPHA = 1.0;
39: PetscScalar BETA = 0.0;
40: #if defined(PETSC_USE_REAL_SINGLE)
41: #define cusparse_solve cusparseScsrsv_solve 42: #define cusparse_analysis cusparseScsrsv_analysis 43: #define cusparse_csr_spmv cusparseScsrmv 44: #define cusparse_csr2csc cusparseScsr2csc 45: #define cusparse_hyb_spmv cusparseShybmv 46: #define cusparse_csr2hyb cusparseScsr2hyb 47: #define cusparse_hyb2csr cusparseShyb2csr 48: #elif defined(PETSC_USE_REAL_DOUBLE)
49: #define cusparse_solve cusparseDcsrsv_solve 50: #define cusparse_analysis cusparseDcsrsv_analysis 51: #define cusparse_csr_spmv cusparseDcsrmv 52: #define cusparse_csr2csc cusparseDcsr2csc 53: #define cusparse_hyb_spmv cusparseDhybmv 54: #define cusparse_csr2hyb cusparseDcsr2hyb 55: #define cusparse_hyb2csr cusparseDhyb2csr 56: #endif
57: #endif
59: #define THRUSTINTARRAY32 thrust::device_vector<int> 60: #define THRUSTINTARRAY thrust::device_vector<PetscInt> 61: #define THRUSTARRAY thrust::device_vector<PetscScalar> 63: /* A CSR matrix structure */
64: struct CsrMatrix {
65: PetscInt num_rows;
66: PetscInt num_cols;
67: PetscInt num_entries;
68: THRUSTINTARRAY32 *row_offsets;
69: THRUSTINTARRAY32 *column_indices;
70: THRUSTARRAY *values;
71: };
73: //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory>
75: /* This is struct holding the relevant data needed to a MatSolve */
76: struct Mat_SeqAIJCUSPARSETriFactorStruct {
77: /* Data needed for triangular solve */
78: cusparseMatDescr_t descr;
79: cusparseSolveAnalysisInfo_t solveInfo;
80: cusparseOperation_t solveOp;
81: CsrMatrix *csrMat;
82: };
84: /* This is struct holding the relevant data needed to a MatMult */
85: struct Mat_SeqAIJCUSPARSEMultStruct {
86: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
87: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
88: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
89: PetscScalar *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
90: PetscScalar *beta; /* pointer to a device "scalar" storing the beta parameter in the SpMV */
91: };
93: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
94: any indices used in a reordering */
95: struct Mat_SeqAIJCUSPARSETriFactors {
96: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
97: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
98: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
99: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
100: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
101: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
102: THRUSTARRAY *workVector;
103: cusparseHandle_t handle; /* a handle to the cusparse library */
104: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
105: };
107: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
108: struct Mat_SeqAIJCUSPARSE {
109: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
110: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
111: THRUSTARRAY *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
112: PetscInt nonzerorow; /* number of nonzero rows ... used in the flop calculations */
113: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
114: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
115: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
116: };
118: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
119: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
120: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
121: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
122: #endif