Actual source code: mcrl.c


  2: /*
  3:   Defines a matrix-vector product for the MATMPIAIJCRL matrix class.
  4:   This class is derived from the MATMPIAIJ class and retains the
  5:   compressed row storage (aka Yale sparse matrix format) but augments
  6:   it with a column oriented storage that is more efficient for
  7:   matrix vector products on Vector machines.

  9:   CRL stands for constant row length (that is the same number of columns
 10:   is kept (padded with zeros) for each row of the sparse matrix.

 12:    See src/mat/impls/aij/seq/crl/crl.c for the sequential version
 13: */

 15: #include <../src/mat/impls/aij/mpi/mpiaij.h>
 16: #include <../src/mat/impls/aij/seq/crl/crl.h>

 18: PetscErrorCode MatDestroy_MPIAIJCRL(Mat A)
 19: {
 21:   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;

 23:   /* Free everything in the Mat_AIJCRL data structure. */
 24:   if (aijcrl) {
 25:     PetscFree2(aijcrl->acols,aijcrl->icols);
 26:     VecDestroy(&aijcrl->fwork);
 27:     VecDestroy(&aijcrl->xwork);
 28:     PetscFree(aijcrl->array);
 29:   }
 30:   PetscFree(A->spptr);

 32:   PetscObjectChangeTypeName((PetscObject)A, MATMPIAIJ);
 33:   MatDestroy_MPIAIJ(A);
 34:   return(0);
 35: }

 37: PetscErrorCode MatMPIAIJCRL_create_aijcrl(Mat A)
 38: {
 39:   Mat_MPIAIJ     *a      = (Mat_MPIAIJ*)(A)->data;
 40:   Mat_SeqAIJ     *Aij    = (Mat_SeqAIJ*)(a->A->data), *Bij = (Mat_SeqAIJ*)(a->B->data);
 41:   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
 42:   PetscInt       m       = A->rmap->n; /* Number of rows in the matrix. */
 43:   PetscInt       nd      = a->A->cmap->n; /* number of columns in diagonal portion */
 44:   PetscInt       *aj     = Aij->j,*bj = Bij->j; /* From the CSR representation; points to the beginning  of each row. */
 45:   PetscInt       i, j,rmax = 0,*icols, *ailen = Aij->ilen, *bilen = Bij->ilen;
 46:   PetscScalar    *aa = Aij->a,*ba = Bij->a,*acols,*array;

 50:   /* determine the row with the most columns */
 51:   for (i=0; i<m; i++) {
 52:     rmax = PetscMax(rmax,ailen[i]+bilen[i]);
 53:   }
 54:   aijcrl->nz   = Aij->nz+Bij->nz;
 55:   aijcrl->m    = A->rmap->n;
 56:   aijcrl->rmax = rmax;

 58:   PetscFree2(aijcrl->acols,aijcrl->icols);
 59:   PetscMalloc2(rmax*m,&aijcrl->acols,rmax*m,&aijcrl->icols);
 60:   acols = aijcrl->acols;
 61:   icols = aijcrl->icols;
 62:   for (i=0; i<m; i++) {
 63:     for (j=0; j<ailen[i]; j++) {
 64:       acols[j*m+i] = *aa++;
 65:       icols[j*m+i] = *aj++;
 66:     }
 67:     for (; j<ailen[i]+bilen[i]; j++) {
 68:       acols[j*m+i] = *ba++;
 69:       icols[j*m+i] = nd + *bj++;
 70:     }
 71:     for (; j<rmax; j++) { /* empty column entries */
 72:       acols[j*m+i] = 0.0;
 73:       icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0;  /* handle case where row is EMPTY */
 74:     }
 75:   }
 76:   PetscInfo1(A,"Percentage of 0's introduced for vectorized multiply %g\n",1.0-((double)(aijcrl->nz))/((double)(rmax*m)));

 78:   PetscFree(aijcrl->array);
 79:   PetscMalloc1(a->B->cmap->n+nd,&array);
 80:   /* xwork array is actually B->n+nd long, but we define xwork this length so can copy into it */
 81:   VecDestroy(&aijcrl->xwork);
 82:   VecCreateMPIWithArray(PetscObjectComm((PetscObject)A),1,nd,PETSC_DECIDE,array,&aijcrl->xwork);
 83:   VecDestroy(&aijcrl->fwork);
 84:   VecCreateSeqWithArray(PETSC_COMM_SELF,1,a->B->cmap->n,array+nd,&aijcrl->fwork);

 86:   aijcrl->array = array;
 87:   aijcrl->xscat = a->Mvctx;
 88:   return(0);
 89: }

 91: PetscErrorCode MatAssemblyEnd_MPIAIJCRL(Mat A, MatAssemblyType mode)
 92: {
 94:   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
 95:   Mat_SeqAIJ     *Aij = (Mat_SeqAIJ*)(a->A->data), *Bij = (Mat_SeqAIJ*)(a->A->data);

 98:   Aij->inode.use = PETSC_FALSE;
 99:   Bij->inode.use = PETSC_FALSE;

101:   MatAssemblyEnd_MPIAIJ(A,mode);
102:   if (mode == MAT_FLUSH_ASSEMBLY) return(0);

104:   /* Now calculate the permutation and grouping information. */
105:   MatMPIAIJCRL_create_aijcrl(A);
106:   return(0);
107: }

109: extern PetscErrorCode MatMult_AIJCRL(Mat,Vec,Vec);
110: extern PetscErrorCode MatDuplicate_AIJCRL(Mat,MatDuplicateOption,Mat*);

112: /* MatConvert_MPIAIJ_MPIAIJCRL converts a MPIAIJ matrix into a
113:  * MPIAIJCRL matrix.  This routine is called by the MatCreate_MPIAIJCRL()
114:  * routine, but can also be used to convert an assembled MPIAIJ matrix
115:  * into a MPIAIJCRL one. */

117: PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
118: {
120:   Mat            B = *newmat;
121:   Mat_AIJCRL     *aijcrl;

124:   if (reuse == MAT_INITIAL_MATRIX) {
125:     MatDuplicate(A,MAT_COPY_VALUES,&B);
126:   }

128:   PetscNewLog(B,&aijcrl);
129:   B->spptr = (void*) aijcrl;

131:   /* Set function pointers for methods that we inherit from AIJ but override. */
132:   B->ops->duplicate   = MatDuplicate_AIJCRL;
133:   B->ops->assemblyend = MatAssemblyEnd_MPIAIJCRL;
134:   B->ops->destroy     = MatDestroy_MPIAIJCRL;
135:   B->ops->mult        = MatMult_AIJCRL;

137:   /* If A has already been assembled, compute the permutation. */
138:   if (A->assembled) {
139:     MatMPIAIJCRL_create_aijcrl(B);
140:   }
141:   PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJCRL);
142:   *newmat = B;
143:   return(0);
144: }

146: /*@C
147:    MatCreateMPIAIJCRL - Creates a sparse matrix of type MPIAIJCRL.
148:    This type inherits from AIJ, but stores some additional
149:    information that is used to allow better vectorization of
150:    the matrix-vector product. At the cost of increased storage, the AIJ formatted
151:    matrix can be copied to a format in which pieces of the matrix are
152:    stored in ELLPACK format, allowing the vectorized matrix multiply
153:    routine to use stride-1 memory accesses.  As with the AIJ type, it is
154:    important to preallocate matrix storage in order to get good assembly
155:    performance.

157:    Collective

159:    Input Parameters:
160: +  comm - MPI communicator, set to PETSC_COMM_SELF
161: .  m - number of rows
162: .  n - number of columns
163: .  nz - number of nonzeros per row (same for all rows)
164: -  nnz - array containing the number of nonzeros in the various rows
165:          (possibly different for each row) or NULL

167:    Output Parameter:
168: .  A - the matrix

170:    Notes:
171:    If nnz is given then nz is ignored

173:    Level: intermediate

175: .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
176: @*/
177: PetscErrorCode  MatCreateMPIAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],PetscInt onz,const PetscInt onnz[],Mat *A)
178: {

182:   MatCreate(comm,A);
183:   MatSetSizes(*A,m,n,m,n);
184:   MatSetType(*A,MATMPIAIJCRL);
185:   MatMPIAIJSetPreallocation_MPIAIJ(*A,nz,(PetscInt*)nnz,onz,(PetscInt*)onnz);
186:   return(0);
187: }

189: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCRL(Mat A)
190: {

194:   MatSetType(A,MATMPIAIJ);
195:   MatConvert_MPIAIJ_MPIAIJCRL(A,MATMPIAIJCRL,MAT_INPLACE_MATRIX,&A);
196:   return(0);
197: }