Actual source code: mpiaijviennacl.cxx
1: #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
3: #include <petscconf.h>
4: #include <../src/mat/impls/aij/mpi/mpiaij.h>
5: #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>
7: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJViennaCL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
8: {
9: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
11: PetscLayoutSetUp(B->rmap);
12: PetscLayoutSetUp(B->cmap);
13: if (!B->preallocated) {
14: /* Explicitly create the two MATSEQAIJVIENNACL matrices. */
15: MatCreate(PETSC_COMM_SELF,&b->A);
16: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
17: MatSetType(b->A,MATSEQAIJVIENNACL);
18: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
19: MatCreate(PETSC_COMM_SELF,&b->B);
20: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
21: MatSetType(b->B,MATSEQAIJVIENNACL);
22: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
23: }
24: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
25: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
26: B->preallocated = PETSC_TRUE;
27: return 0;
28: }
30: PetscErrorCode MatAssemblyEnd_MPIAIJViennaCL(Mat A,MatAssemblyType mode)
31: {
32: Mat_MPIAIJ *b = (Mat_MPIAIJ*)A->data;
33: PetscBool v;
35: MatAssemblyEnd_MPIAIJ(A,mode);
36: PetscObjectTypeCompare((PetscObject)b->lvec,VECSEQVIENNACL,&v);
37: if (!v) {
38: PetscInt m;
39: VecGetSize(b->lvec,&m);
40: VecDestroy(&b->lvec);
41: VecCreateSeqViennaCL(PETSC_COMM_SELF,m,&b->lvec);
42: }
43: return 0;
44: }
46: PetscErrorCode MatDestroy_MPIAIJViennaCL(Mat A)
47: {
48: MatDestroy_MPIAIJ(A);
49: return 0;
50: }
52: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJViennaCL(Mat A)
53: {
54: MatCreate_MPIAIJ(A);
55: A->boundtocpu = PETSC_FALSE;
56: PetscFree(A->defaultvectype);
57: PetscStrallocpy(VECVIENNACL,&A->defaultvectype);
58: PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJViennaCL);
59: A->ops->assemblyend = MatAssemblyEnd_MPIAIJViennaCL;
60: PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJVIENNACL);
61: return 0;
62: }
64: /*@C
65: MatCreateAIJViennaCL - Creates a sparse matrix in AIJ (compressed row) format
66: (the default parallel PETSc format). This matrix will ultimately be pushed down
67: to GPUs and use the ViennaCL library for calculations. For good matrix
68: assembly performance the user should preallocate the matrix storage by setting
69: the parameter nz (or the array nnz). By setting these parameters accurately,
70: performance during matrix assembly can be increased substantially.
72: Collective
74: Input Parameters:
75: + comm - MPI communicator, set to PETSC_COMM_SELF
76: . m - number of rows
77: . n - number of columns
78: . nz - number of nonzeros per row (same for all rows)
79: - nnz - array containing the number of nonzeros in the various rows
80: (possibly different for each row) or NULL
82: Output Parameter:
83: . A - the matrix
85: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
86: MatXXXXSetPreallocation() paradigm instead of this routine directly.
87: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
89: Notes:
90: If nnz is given then nz is ignored
92: The AIJ format (also called the Yale sparse matrix format or
93: compressed row storage), is fully compatible with standard Fortran 77
94: storage. That is, the stored row and column indices can begin at
95: either one (as in Fortran) or zero. See the users' manual for details.
97: Specify the preallocated storage with either nz or nnz (not both).
98: Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
99: allocation. For large problems you MUST preallocate memory or you
100: will get TERRIBLE performance, see the users' manual chapter on matrices.
102: Level: intermediate
104: .seealso: MatCreate(), MatCreateAIJ(), MatCreateAIJCUSPARSE(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJVIENNACL, MATAIJVIENNACL
105: @*/
106: PetscErrorCode MatCreateAIJViennaCL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
107: {
108: PetscMPIInt size;
110: MatCreate(comm,A);
111: MatSetSizes(*A,m,n,M,N);
112: MPI_Comm_size(comm,&size);
113: if (size > 1) {
114: MatSetType(*A,MATMPIAIJVIENNACL);
115: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
116: } else {
117: MatSetType(*A,MATSEQAIJVIENNACL);
118: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
119: }
120: return 0;
121: }
123: /*MC
124: MATAIJVIENNACL - MATMPIAIJVIENNACL= "aijviennacl" = "mpiaijviennacl" - A matrix type to be used for sparse matrices.
126: A matrix type (CSR format) whose data resides on GPUs.
127: All matrix calculations are performed using the ViennaCL library.
129: This matrix type is identical to MATSEQAIJVIENNACL when constructed with a single process communicator,
130: and MATMPIAIJVIENNACL otherwise. As a result, for single process communicators,
131: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
132: for communicators controlling multiple processes. It is recommended that you call both of
133: the above preallocation routines for simplicity.
135: Options Database Keys:
136: . -mat_type mpiaijviennacl - sets the matrix type to "mpiaijviennacl" during a call to MatSetFromOptions()
138: Level: beginner
140: .seealso: MatCreateAIJViennaCL(), MATSEQAIJVIENNACL, MatCreateSeqAIJVIENNACL()
141: M*/