Actual source code: mpiaijviennacl.cxx

  1: #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1

  3: #include <petscconf.h>
  4: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  5: #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>

  7: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJViennaCL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
  8: {
  9:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

 13:   PetscLayoutSetUp(B->rmap);
 14:   PetscLayoutSetUp(B->cmap);
 15:   if (!B->preallocated) {
 16:     /* Explicitly create the two MATSEQAIJVIENNACL matrices. */
 17:     MatCreate(PETSC_COMM_SELF,&b->A);
 18:     MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
 19:     MatSetType(b->A,MATSEQAIJVIENNACL);
 20:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
 21:     MatCreate(PETSC_COMM_SELF,&b->B);
 22:     MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
 23:     MatSetType(b->B,MATSEQAIJVIENNACL);
 24:     PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
 25:   }
 26:   MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
 27:   MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
 28:   B->preallocated = PETSC_TRUE;
 29:   return(0);
 30: }

 32: PetscErrorCode MatAssemblyEnd_MPIAIJViennaCL(Mat A,MatAssemblyType mode)
 33: {
 34:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)A->data;
 36:   PetscBool      v;

 39:   MatAssemblyEnd_MPIAIJ(A,mode);
 40:   PetscObjectTypeCompare((PetscObject)b->lvec,VECSEQVIENNACL,&v);
 41:   if (!v) {
 42:     PetscInt m;
 43:     VecGetSize(b->lvec,&m);
 44:     VecDestroy(&b->lvec);
 45:     VecCreateSeqViennaCL(PETSC_COMM_SELF,m,&b->lvec);
 46:   }
 47:   return(0);
 48: }

 50: PetscErrorCode MatDestroy_MPIAIJViennaCL(Mat A)
 51: {

 55:   MatDestroy_MPIAIJ(A);
 56:   return(0);
 57: }

 59: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJViennaCL(Mat A)
 60: {

 64:   MatCreate_MPIAIJ(A);
 65:   A->boundtocpu = PETSC_FALSE;
 66:   PetscFree(A->defaultvectype);
 67:   PetscStrallocpy(VECVIENNACL,&A->defaultvectype);
 68:   PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJViennaCL);
 69:   A->ops->assemblyend = MatAssemblyEnd_MPIAIJViennaCL;
 70:   PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJVIENNACL);
 71:   return(0);
 72: }


 75: /*@C
 76:    MatCreateAIJViennaCL - Creates a sparse matrix in AIJ (compressed row) format
 77:    (the default parallel PETSc format).  This matrix will ultimately be pushed down
 78:    to GPUs and use the ViennaCL library for calculations. For good matrix
 79:    assembly performance the user should preallocate the matrix storage by setting
 80:    the parameter nz (or the array nnz).  By setting these parameters accurately,
 81:    performance during matrix assembly can be increased substantially.


 84:    Collective

 86:    Input Parameters:
 87: +  comm - MPI communicator, set to PETSC_COMM_SELF
 88: .  m - number of rows
 89: .  n - number of columns
 90: .  nz - number of nonzeros per row (same for all rows)
 91: -  nnz - array containing the number of nonzeros in the various rows
 92:          (possibly different for each row) or NULL

 94:    Output Parameter:
 95: .  A - the matrix

 97:    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
 98:    MatXXXXSetPreallocation() paradigm instead of this routine directly.
 99:    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]

101:    Notes:
102:    If nnz is given then nz is ignored

104:    The AIJ format (also called the Yale sparse matrix format or
105:    compressed row storage), is fully compatible with standard Fortran 77
106:    storage.  That is, the stored row and column indices can begin at
107:    either one (as in Fortran) or zero.  See the users' manual for details.

109:    Specify the preallocated storage with either nz or nnz (not both).
110:    Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
111:    allocation.  For large problems you MUST preallocate memory or you
112:    will get TERRIBLE performance, see the users' manual chapter on matrices.

114:    Level: intermediate

116: .seealso: MatCreate(), MatCreateAIJ(), MatCreateAIJCUSPARSE(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJVIENNACL, MATAIJVIENNACL
117: @*/
118: PetscErrorCode  MatCreateAIJViennaCL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
119: {
121:   PetscMPIInt    size;

124:   MatCreate(comm,A);
125:   MatSetSizes(*A,m,n,M,N);
126:   MPI_Comm_size(comm,&size);
127:   if (size > 1) {
128:     MatSetType(*A,MATMPIAIJVIENNACL);
129:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
130:   } else {
131:     MatSetType(*A,MATSEQAIJVIENNACL);
132:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
133:   }
134:   return(0);
135: }

137: /*MC
138:    MATAIJVIENNACL - MATMPIAIJVIENNACL= "aijviennacl" = "mpiaijviennacl" - A matrix type to be used for sparse matrices.

140:    A matrix type (CSR format) whose data resides on GPUs.
141:    All matrix calculations are performed using the ViennaCL library.

143:    This matrix type is identical to MATSEQAIJVIENNACL when constructed with a single process communicator,
144:    and MATMPIAIJVIENNACL otherwise.  As a result, for single process communicators,
145:    MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
146:    for communicators controlling multiple processes.  It is recommended that you call both of
147:    the above preallocation routines for simplicity.

149:    Options Database Keys:
150: .  -mat_type mpiaijviennacl - sets the matrix type to "mpiaijviennacl" during a call to MatSetFromOptions()

152:   Level: beginner

154:  .seealso: MatCreateAIJViennaCL(), MATSEQAIJVIENNACL, MatCreateSeqAIJVIENNACL()
155: M*/