2: #include <../src/mat/impls/aij/mpi/mpiaij.h> 3: /*@C
4: MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local
5: portions are stored as SEQAIJPERM matrices (a matrix class that inherits
6: from SEQAIJ but includes some optimizations to allow more effective
7: vectorization). The same guidelines that apply to MPIAIJ matrices for
8: preallocating the matrix storage apply here as well.
10: Collective on MPI_Comm 12: Input Parameters:
13: + comm - MPI communicator
14: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
15: This value should be the same as the local size used in creating the
16: y vector for the matrix-vector product y = Ax.
17: . n - This value should be the same as the local size used in creating the
18: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
19: calculated if N is given) For square matrices n is almost always m.
20: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
21: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
22: . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix
23: (same value is used for all local rows)
24: . d_nnz - array containing the number of nonzeros in the various rows of the
25: DIAGONAL portion of the local submatrix (possibly different for each row)
26: or NULL, if d_nz is used to specify the nonzero structure.
27: The size of this array is equal to the number of local rows, i.e 'm'.
28: For matrices you plan to factor you must leave room for the diagonal entry and
29: put in the entry even if it is zero.
30: . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local
31: submatrix (same value is used for all local rows).
32: - o_nnz - array containing the number of nonzeros in the various rows of the
33: OFF-DIAGONAL portion of the local submatrix (possibly different for
34: each row) or NULL, if o_nz is used to specify the nonzero
35: structure. The size of this array is equal to the number
36: of local rows, i.e 'm'.
38: Output Parameter:
39: . A - the matrix
41: Notes:
42: If the *_nnz parameter is given then the *_nz parameter is ignored
44: m,n,M,N parameters specify the size of the matrix, and its partitioning across
45: processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
46: storage requirements for this matrix.
48: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
49: processor than it must be used on all processors that share the object for
50: that argument.
52: The user MUST specify either the local or global matrix dimensions
53: (possibly both).
55: The parallel matrix is partitioned such that the first m0 rows belong to
56: process 0, the next m1 rows belong to process 1, the next m2 rows belong
57: to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
59: The DIAGONAL portion of the local submatrix of a processor can be defined
60: as the submatrix which is obtained by extraction the part corresponding
61: to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
62: first row that belongs to the processor, and r2 is the last row belonging
63: to the this processor. This is a square mxm matrix. The remaining portion
64: of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
66: If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
68: When calling this routine with a single process communicator, a matrix of
69: type SEQAIJPERM is returned. If a matrix of type MPIAIJPERM is desired
70: for this type of communicator, use the construction mechanism:
71: MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...);
73: By default, this format uses inodes (identical nodes) when possible.
74: We search for consecutive rows with the same nonzero structure, thereby
75: reusing matrix information to achieve increased efficiency.
77: Options Database Keys:
78: + -mat_no_inode - Do not use inodes
79: - -mat_inode_limit <limit> - Sets inode limit (max limit=5)
81: Level: intermediate
83: .keywords: matrix, cray, sparse, parallel
85: .seealso: MatCreate(), MatCreateSeqAIJPERM(), MatSetValues()
86: @*/
87: PetscErrorCodeMatCreateMPIAIJPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) 88: {
90: PetscMPIInt size;
93: MatCreate(comm,A);
94: MatSetSizes(*A,m,n,M,N);
95: MPI_Comm_size(comm,&size);
96: if (size > 1) {
97: MatSetType(*A,MATMPIAIJPERM);
98: MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
99: } else {
100: MatSetType(*A,MATSEQAIJPERM);
101: MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
102: }
103: return(0);
104: }
106: PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])107: {
108: Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
112: MatMPIAIJSetPreallocation_MPIAIJ(B,d_nz,d_nnz,o_nz,o_nnz);
113: MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->A);
114: MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->B);
115: return(0);
116: }
118: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat A,MatType type,MatReuse reuse,Mat *newmat)119: {
121: Mat B = *newmat;
124: if (reuse == MAT_INITIAL_MATRIX) {
125: MatDuplicate(A,MAT_COPY_VALUES,&B);
126: }
128: PetscObjectChangeTypeName((PetscObject) B, MATMPIAIJPERM);
129: PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJPERM);
130: *newmat = B;
131: return(0);
132: }
134: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJPERM(Mat A)135: {
139: MatSetType(A,MATMPIAIJ);
140: MatConvert_MPIAIJ_MPIAIJPERM(A,MATMPIAIJPERM,MAT_INPLACE_MATRIX,&A);
141: return(0);
142: }
144: /*MC
145: MATAIJPERM - MATAIJPERM = "AIJPERM" - A matrix type to be used for sparse matrices.
147: This matrix type is identical to MATSEQAIJPERM when constructed with a single process communicator,
148: and MATMPIAIJPERM otherwise. As a result, for single process communicators,
149: MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
150: for communicators controlling multiple processes. It is recommended that you call both of
151: the above preallocation routines for simplicity.
153: Options Database Keys:
154: . -mat_type aijperm - sets the matrix type to "AIJPERM" during a call to MatSetFromOptions()
156: Level: beginner
158: .seealso: MatCreateMPIAIJPERM(), MATSEQAIJPERM, MATMPIAIJPERM
159: M*/