2: /*
3: Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
4: This class is derived from the MATSEQAIJ class and retains the
5: compressed row storage (aka Yale sparse matrix format) but augments
6: it with a column oriented storage that is more efficient for
7: matrix vector products on Vector machines.
9: CRL stands for constant row length (that is the same number of columns
10: is kept (padded with zeros) for each row of the sparse matrix.
11: */
12: #include <../src/mat/impls/aij/seq/crl/crl.h>
16: PetscErrorCode MatDestroy_SeqAIJCRL(Mat A) 17: {
19: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
21: /* Free everything in the Mat_AIJCRL data structure. */
22: if (aijcrl) {
23: PetscFree2(aijcrl->acols,aijcrl->icols);
24: }
25: PetscFree(A->spptr);
26: PetscObjectChangeTypeName((PetscObject)A, MATSEQAIJ);
27: MatDestroy_SeqAIJ(A);
28: return(0);
29: }
31: PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M) 32: {
34: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
35: return(0);
36: }
40: PetscErrorCode MatSeqAIJCRL_create_aijcrl(Mat A) 41: {
42: Mat_SeqAIJ *a = (Mat_SeqAIJ*)(A)->data;
43: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
44: PetscInt m = A->rmap->n; /* Number of rows in the matrix. */
45: PetscInt *aj = a->j; /* From the CSR representation; points to the beginning of each row. */
46: PetscInt i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
47: MatScalar *aa = a->a;
48: PetscScalar *acols;
52: aijcrl->nz = a->nz;
53: aijcrl->m = A->rmap->n;
54: aijcrl->rmax = rmax;
56: PetscFree2(aijcrl->acols,aijcrl->icols);
57: PetscMalloc2(rmax*m,PetscScalar,&aijcrl->acols,rmax*m,PetscInt,&aijcrl->icols);
58: acols = aijcrl->acols;
59: icols = aijcrl->icols;
60: for (i=0; i<m; i++) {
61: for (j=0; j<ilen[i]; j++) {
62: acols[j*m+i] = *aa++;
63: icols[j*m+i] = *aj++;
64: }
65: for (; j<rmax; j++) { /* empty column entries */
66: acols[j*m+i] = 0.0;
67: icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0; /* handle case where row is EMPTY */
68: }
69: }
70: PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %g. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);
71: return(0);
72: }
74: extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
78: PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode) 79: {
81: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
84: a->inode.use = PETSC_FALSE;
86: MatAssemblyEnd_SeqAIJ(A,mode);
87: if (mode == MAT_FLUSH_ASSEMBLY) return(0);
89: /* Now calculate the permutation and grouping information. */
90: MatSeqAIJCRL_create_aijcrl(A);
91: return(0);
92: }
94: #include <../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h>
98: /*
99: Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
100: - the scatter is used only in the parallel version
102: */
103: PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)104: {
105: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
106: PetscInt m = aijcrl->m; /* Number of rows in the matrix. */
107: PetscInt rmax = aijcrl->rmax,*icols = aijcrl->icols;
108: PetscScalar *acols = aijcrl->acols;
110: PetscScalar *x,*y;
111: #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
112: PetscInt i,j,ii;
113: #endif
116: #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
117: #pragma disjoint(*x,*y,*aa)
118: #endif
121: if (aijcrl->xscat) {
122: VecCopy(xx,aijcrl->xwork);
123: /* get remote values needed for local part of multiply */
124: VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);
125: VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);
126: xx = aijcrl->xwork;
127: }
129: VecGetArray(xx,&x);
130: VecGetArray(yy,&y);
132: #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
133: fortranmultcrl_(&m,&rmax,x,y,icols,acols);
134: #else
136: /* first column */
137: for (j=0; j<m; j++) y[j] = acols[j]*x[icols[j]];
139: /* other columns */
140: #if defined(PETSC_HAVE_CRAY_VECTOR)
141: #pragma _CRI preferstream
142: #endif
143: for (i=1; i<rmax; i++) {
144: ii = i*m;
145: #if defined(PETSC_HAVE_CRAY_VECTOR)
146: #pragma _CRI prefervector
147: #endif
148: for (j=0; j<m; j++) y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
149: }
150: #if defined(PETSC_HAVE_CRAY_VECTOR)
151: #pragma _CRI ivdep
152: #endif
154: #endif
155: PetscLogFlops(2.0*aijcrl->nz - m);
156: VecRestoreArray(xx,&x);
157: VecRestoreArray(yy,&y);
158: return(0);
159: }
162: /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
163: * SeqAIJCRL matrix. This routine is called by the MatCreate_SeqAIJCRL()
164: * routine, but can also be used to convert an assembled SeqAIJ matrix
165: * into a SeqAIJCRL one. */
168: PETSC_EXTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCRL(Mat A,MatType type,MatReuse reuse,Mat *newmat)169: {
171: Mat B = *newmat;
172: Mat_AIJCRL *aijcrl;
175: if (reuse == MAT_INITIAL_MATRIX) {
176: MatDuplicate(A,MAT_COPY_VALUES,&B);
177: }
179: PetscNewLog(B,Mat_AIJCRL,&aijcrl);
180: B->spptr = (void*) aijcrl;
182: /* Set function pointers for methods that we inherit from AIJ but override. */
183: B->ops->duplicate = MatDuplicate_AIJCRL;
184: B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
185: B->ops->destroy = MatDestroy_SeqAIJCRL;
186: B->ops->mult = MatMult_AIJCRL;
188: /* If A has already been assembled, compute the permutation. */
189: if (A->assembled) {
190: MatSeqAIJCRL_create_aijcrl(B);
191: }
192: PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);
193: *newmat = B;
194: return(0);
195: }
199: /*@C
200: MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
201: This type inherits from AIJ, but stores some additional
202: information that is used to allow better vectorization of
203: the matrix-vector product. At the cost of increased storage, the AIJ formatted
204: matrix can be copied to a format in which pieces of the matrix are
205: stored in ELLPACK format, allowing the vectorized matrix multiply
206: routine to use stride-1 memory accesses. As with the AIJ type, it is
207: important to preallocate matrix storage in order to get good assembly
208: performance.
210: Collective on MPI_Comm212: Input Parameters:
213: + comm - MPI communicator, set to PETSC_COMM_SELF214: . m - number of rows
215: . n - number of columns
216: . nz - number of nonzeros per row (same for all rows)
217: - nnz - array containing the number of nonzeros in the various rows
218: (possibly different for each row) or NULL
220: Output Parameter:
221: . A - the matrix
223: Notes:
224: If nnz is given then nz is ignored
226: Level: intermediate
228: .keywords: matrix, cray, sparse, parallel
230: .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
231: @*/
232: PetscErrorCodeMatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)233: {
237: MatCreate(comm,A);
238: MatSetSizes(*A,m,n,m,n);
239: MatSetType(*A,MATSEQAIJCRL);
240: MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);
241: return(0);
242: }
246: PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCRL(Mat A)247: {
251: MatSetType(A,MATSEQAIJ);
252: MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_REUSE_MATRIX,&A);
253: return(0);
254: }