Actual source code: cusparsematimpl.h

petsc-3.10.5 2019-03-28
Report Typos and Errors

  4:  #include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>

  6: #if CUDA_VERSION>=4020
  7: #include <cusparse_v2.h>
  8: #else
  9: #include <cusparse.h>
 10: #endif

 12: #include <algorithm>
 13: #include <vector>

 15: #include <thrust/device_vector.h>
 16: #include <thrust/device_ptr.h>
 17: #include <thrust/transform.h>
 18: #include <thrust/functional.h>
 19: #include <thrust/sequence.h>

 21: #if defined(PETSC_USE_COMPLEX)
 22: #if defined(PETSC_USE_REAL_SINGLE)  
 23: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
 24: #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
 25: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
 26: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
 27: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
 28: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
 29: #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
 30: cuFloatComplex PETSC_CUSPARSE_ONE  = {1.0f, 0.0f};
 31: cuFloatComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
 32: #elif defined(PETSC_USE_REAL_DOUBLE)
 33: #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
 34: #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
 35: #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
 36: #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
 37: #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
 38: #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
 39: #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
 40: cuDoubleComplex PETSC_CUSPARSE_ONE  = {1.0, 0.0};
 41: cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
 42: #endif
 43: #else
 44: PetscScalar PETSC_CUSPARSE_ONE  = 1.0;
 45: PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
 46: #if defined(PETSC_USE_REAL_SINGLE)  
 47: #define cusparse_solve    cusparseScsrsv_solve
 48: #define cusparse_analysis cusparseScsrsv_analysis
 49: #define cusparse_csr_spmv cusparseScsrmv
 50: #define cusparse_csr2csc  cusparseScsr2csc
 51: #define cusparse_hyb_spmv cusparseShybmv
 52: #define cusparse_csr2hyb  cusparseScsr2hyb
 53: #define cusparse_hyb2csr  cusparseShyb2csr
 54: #elif defined(PETSC_USE_REAL_DOUBLE)
 55: #define cusparse_solve    cusparseDcsrsv_solve
 56: #define cusparse_analysis cusparseDcsrsv_analysis
 57: #define cusparse_csr_spmv cusparseDcsrmv
 58: #define cusparse_csr2csc  cusparseDcsr2csc
 59: #define cusparse_hyb_spmv cusparseDhybmv
 60: #define cusparse_csr2hyb  cusparseDcsr2hyb
 61: #define cusparse_hyb2csr  cusparseDhyb2csr
 62: #endif
 63: #endif

 65: #define THRUSTINTARRAY32 thrust::device_vector<int>
 66: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
 67: #define THRUSTARRAY thrust::device_vector<PetscScalar>

 69: /* A CSR matrix structure */
 70: struct CsrMatrix {
 71:   PetscInt         num_rows;
 72:   PetscInt         num_cols;
 73:   PetscInt         num_entries;
 74:   THRUSTINTARRAY32 *row_offsets;
 75:   THRUSTINTARRAY32 *column_indices;
 76:   THRUSTARRAY      *values;
 77: };

 79: //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory>

 81: /* This is struct holding the relevant data needed to a MatSolve */
 82: struct Mat_SeqAIJCUSPARSETriFactorStruct {
 83:   /* Data needed for triangular solve */
 84:   cusparseMatDescr_t          descr;
 85:   cusparseSolveAnalysisInfo_t solveInfo;
 86:   cusparseOperation_t         solveOp;
 87:   CsrMatrix                   *csrMat;
 88: };

 90: /* This is struct holding the relevant data needed to a MatMult */
 91: struct Mat_SeqAIJCUSPARSEMultStruct {
 92:   void               *mat;  /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
 93:   cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
 94:   THRUSTINTARRAY     *cprowIndices;   /* compressed row indices used in the parallel SpMV */
 95:   PetscScalar        *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
 96:   PetscScalar        *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
 97:   PetscScalar        *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
 98: };

100: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
101:  any indices used in a reordering */
102: struct Mat_SeqAIJCUSPARSETriFactors {
103:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
104:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
105:   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
106:   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
107:   THRUSTINTARRAY                    *rpermIndices;  /* indices used for any reordering */
108:   THRUSTINTARRAY                    *cpermIndices;  /* indices used for any reordering */
109:   THRUSTARRAY                       *workVector;
110:   cusparseHandle_t                  handle;   /* a handle to the cusparse library */
111:   PetscInt                          nnz;      /* number of nonzeros ... need this for accurate logging between ICC and ILU */
112: };

114: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
115: struct Mat_SeqAIJCUSPARSE {
116:   Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
117:   Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
118:   THRUSTARRAY                  *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
119:   PetscInt                     nonzerorow; /* number of nonzero rows ... used in the flop calculations */
120:   MatCUSPARSEStorageFormat     format;   /* the storage format for the matrix on the device */
121:   cudaStream_t                 stream;   /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
122:   cusparseHandle_t             handle;   /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
123:   PetscObjectState             nonzerostate;
124: };

126: PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
127: PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
128: PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
129: PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
130: #endif