Actual source code: mpiaijviennacl.cxx
petsc-3.6.1 2015-08-06
1: #include <petscconf.h>
2: #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
3: #include <../src/mat/impls/aij/seq/seqviennacl/viennaclmatimpl.h>
7: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJViennaCL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
8: {
9: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
13: PetscLayoutSetUp(B->rmap);
14: PetscLayoutSetUp(B->cmap);
15: if (!B->preallocated) {
16: /* Explicitly create the two MATSEQAIJVIENNACL matrices. */
17: MatCreate(PETSC_COMM_SELF,&b->A);
18: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
19: MatSetType(b->A,MATSEQAIJVIENNACL);
20: PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);
21: MatCreate(PETSC_COMM_SELF,&b->B);
22: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
23: MatSetType(b->B,MATSEQAIJVIENNACL);
24: PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);
25: }
26: MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);
27: MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);
28: B->preallocated = PETSC_TRUE;
29: return(0);
30: }
34: PetscErrorCode MatCreateVecs_MPIAIJViennaCL(Mat mat,Vec *right,Vec *left)
35: {
37: PetscInt rbs,cbs;
40: MatGetBlockSizes(mat,&rbs,&cbs);
41: if (right) {
42: VecCreate(PetscObjectComm((PetscObject)mat),right);
43: VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);
44: VecSetBlockSize(*right,cbs);
45: VecSetType(*right,VECVIENNACL);
46: VecSetLayout(*right,mat->cmap);
47: }
48: if (left) {
49: VecCreate(PetscObjectComm((PetscObject)mat),left);
50: VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);
51: VecSetBlockSize(*left,rbs);
52: VecSetType(*left,VECVIENNACL);
53: VecSetLayout(*left,mat->rmap);
54: }
55: return(0);
56: }
61: PetscErrorCode MatDestroy_MPIAIJViennaCL(Mat A)
62: {
66: MatDestroy_MPIAIJ(A);
67: return(0);
68: }
72: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJViennaCL(Mat A)
73: {
77: MatCreate_MPIAIJ(A);
78: PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJViennaCL);
79: A->ops->getvecs = MatCreateVecs_MPIAIJViennaCL;
81: PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJVIENNACL);
82: return(0);
83: }
86: /*@
87: MatCreateAIJViennaCL - Creates a sparse matrix in AIJ (compressed row) format
88: (the default parallel PETSc format). This matrix will ultimately be pushed down
89: to GPUs and use the ViennaCL library for calculations. For good matrix
90: assembly performance the user should preallocate the matrix storage by setting
91: the parameter nz (or the array nnz). By setting these parameters accurately,
92: performance during matrix assembly can be increased substantially.
95: Collective on MPI_Comm
97: Input Parameters:
98: + comm - MPI communicator, set to PETSC_COMM_SELF
99: . m - number of rows
100: . n - number of columns
101: . nz - number of nonzeros per row (same for all rows)
102: - nnz - array containing the number of nonzeros in the various rows
103: (possibly different for each row) or NULL
105: Output Parameter:
106: . A - the matrix
108: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
109: MatXXXXSetPreallocation() paradigm instead of this routine directly.
110: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
112: Notes:
113: If nnz is given then nz is ignored
115: The AIJ format (also called the Yale sparse matrix format or
116: compressed row storage), is fully compatible with standard Fortran 77
117: storage. That is, the stored row and column indices can begin at
118: either one (as in Fortran) or zero. See the users' manual for details.
120: Specify the preallocated storage with either nz or nnz (not both).
121: Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
122: allocation. For large problems you MUST preallocate memory or you
123: will get TERRIBLE performance, see the users' manual chapter on matrices.
125: Level: intermediate
127: .seealso: MatCreate(), MatCreateAIJ(), MatCreateAIJCUSP(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJVIENNACL, MATAIJVIENNACL
128: @*/
131: PetscErrorCode MatCreateAIJViennaCL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
132: {
134: PetscMPIInt size;
137: MatCreate(comm,A);
138: MatSetSizes(*A,m,n,M,N);
139: MPI_Comm_size(comm,&size);
140: if (size > 1) {
141: MatSetType(*A,MATMPIAIJVIENNACL);
142: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
143: } else {
144: MatSetType(*A,MATSEQAIJVIENNACL);
145: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
146: }
147: return(0);
148: }
150: /*M
151: MATAIJVIENNACL - MATMPIAIJVIENNACL= "aijviennacl" = "mpiaijviennacl" - A matrix type to be used for sparse matrices.
153: A matrix type (CSR format) whose data resides on GPUs.
154: All matrix calculations are performed using the ViennaCL library.
156: This matrix type is identical to MATSEQAIJVIENNACL when constructed with a single process communicator,
157: and MATMPIAIJVIENNACL otherwise. As a result, for single process communicators,
158: MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
159: for communicators controlling multiple processes. It is recommended that you call both of
160: the above preallocation routines for simplicity.
162: Options Database Keys:
163: + -mat_type mpiaijviennacl - sets the matrix type to "mpiaijviennacl" during a call to MatSetFromOptions()
165: Level: beginner
167: .seealso: MatCreateAIJViennaCL(), MATSEQAIJVIENNACL, MatCreateSeqAIJVIENNACL()
168: M*/