Actual source code: mpiaijmkl.c

petsc-3.8.4 2018-03-24
Report Typos and Errors
  1:  #include <../src/mat/impls/aij/mpi/mpiaij.h>
  2: /*@C
  3:    MatCreateMPIAIJMKL - Creates a sparse parallel matrix whose local
  4:    portions are stored as SEQAIJMKL matrices (a matrix class that inherits
  5:    from SEQAIJ but uses some operations provided by Intel MKL).  The same 
  6:    guidelines that apply to MPIAIJ matrices for preallocating the matrix 
  7:    storage apply here as well.

  9:       Collective on MPI_Comm

 11:    Input Parameters:
 12: +  comm - MPI communicator
 13: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
 14:            This value should be the same as the local size used in creating the
 15:            y vector for the matrix-vector product y = Ax.
 16: .  n - This value should be the same as the local size used in creating the
 17:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
 18:        calculated if N is given) For square matrices n is almost always m.
 19: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
 20: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
 21: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
 22:            (same value is used for all local rows)
 23: .  d_nnz - array containing the number of nonzeros in the various rows of the
 24:            DIAGONAL portion of the local submatrix (possibly different for each row)
 25:            or NULL, if d_nz is used to specify the nonzero structure.
 26:            The size of this array is equal to the number of local rows, i.e 'm'.
 27:            For matrices you plan to factor you must leave room for the diagonal entry and
 28:            put in the entry even if it is zero.
 29: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
 30:            submatrix (same value is used for all local rows).
 31: -  o_nnz - array containing the number of nonzeros in the various rows of the
 32:            OFF-DIAGONAL portion of the local submatrix (possibly different for
 33:            each row) or NULL, if o_nz is used to specify the nonzero
 34:            structure. The size of this array is equal to the number
 35:            of local rows, i.e 'm'.

 37:    Output Parameter:
 38: .  A - the matrix

 40:    Notes:
 41:    If the *_nnz parameter is given then the *_nz parameter is ignored

 43:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
 44:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
 45:    storage requirements for this matrix.

 47:    If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one
 48:    processor than it must be used on all processors that share the object for
 49:    that argument.

 51:    The user MUST specify either the local or global matrix dimensions
 52:    (possibly both).

 54:    The parallel matrix is partitioned such that the first m0 rows belong to
 55:    process 0, the next m1 rows belong to process 1, the next m2 rows belong
 56:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

 58:    The DIAGONAL portion of the local submatrix of a processor can be defined
 59:    as the submatrix which is obtained by extraction the part corresponding
 60:    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
 61:    first row that belongs to the processor, and r2 is the last row belonging
 62:    to the this processor. This is a square mxm matrix. The remaining portion
 63:    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.

 65:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

 67:    When calling this routine with a single process communicator, a matrix of
 68:    type SEQAIJMKL is returned.  If a matrix of type MPIAIJMKL is desired
 69:    for this type of communicator, use the construction mechanism:
 70:      MatCreate(...,&A); MatSetType(A,MPIAIJMKL); MatMPIAIJSetPreallocation(A,...);

 72:    Options Database Keys:
 73: .  -mat_aijmkl_no_spmv2 - disables use of the SpMV2 inspector-executor routines

 75:    Level: intermediate

 77: .keywords: matrix, MKL, sparse, parallel

 79: .seealso: MatCreate(), MatCreateSeqAIJMKL(), MatSetValues()
 80: @*/
 81: PetscErrorCode  MatCreateMPIAIJMKL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
 82: {
 84:   PetscMPIInt    size;

 87:   MatCreate(comm,A);
 88:   MatSetSizes(*A,m,n,M,N);
 89:   MPI_Comm_size(comm,&size);
 90:   if (size > 1) {
 91:     MatSetType(*A,MATMPIAIJMKL);
 92:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
 93:   } else {
 94:     MatSetType(*A,MATSEQAIJMKL);
 95:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
 96:   }
 97:   return(0);
 98: }

100: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJMKL(Mat,MatType,MatReuse,Mat*);

102: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJMKL(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
103: {
104:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

108:   MatMPIAIJSetPreallocation_MPIAIJ(B,d_nz,d_nnz,o_nz,o_nnz);
109:   MatConvert_SeqAIJ_SeqAIJMKL(b->A, MATSEQAIJMKL, MAT_INPLACE_MATRIX, &b->A);
110:   MatConvert_SeqAIJ_SeqAIJMKL(b->B, MATSEQAIJMKL, MAT_INPLACE_MATRIX, &b->B);
111:   return(0);
112: }

114: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
115: {
117:   Mat            B = *newmat;

120:   if (reuse == MAT_INITIAL_MATRIX) {
121:     MatDuplicate(A,MAT_COPY_VALUES,&B);
122:   }

124:   PetscObjectChangeTypeName((PetscObject) B, MATMPIAIJMKL);
125:   PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJMKL);
126:   *newmat = B;
127:   return(0);
128: }

130: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJMKL(Mat A)
131: {

135:   MatSetType(A,MATMPIAIJ);
136:   MatConvert_MPIAIJ_MPIAIJMKL(A,MATMPIAIJMKL,MAT_INPLACE_MATRIX,&A);
137:   return(0);
138: }

140: /*MC
141:    MATAIJMKL - MATAIJMKL = "AIJMKL" - A matrix type to be used for sparse matrices.

143:    This matrix type is identical to MATSEQAIJMKL when constructed with a single process communicator,
144:    and MATMPIAIJMKL otherwise.  As a result, for single process communicators,
145:   MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
146:   for communicators controlling multiple processes.  It is recommended that you call both of
147:   the above preallocation routines for simplicity.

149:    Options Database Keys:
150: . -mat_type aijmkl - sets the matrix type to "AIJMKL" during a call to MatSetFromOptions()

152:   Level: beginner

154: .seealso: MatCreateMPIAIJMKL(), MATSEQAIJMKL, MATMPIAIJMKL
155: M*/