Actual source code: crl.c
petsc-3.11.4 2019-09-28
2: /*
3: Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
4: This class is derived from the MATSEQAIJ class and retains the
5: compressed row storage (aka Yale sparse matrix format) but augments
6: it with a column oriented storage that is more efficient for
7: matrix vector products on Vector machines.
9: CRL stands for constant row length (that is the same number of columns
10: is kept (padded with zeros) for each row of the sparse matrix.
11: */
12: #include <../src/mat/impls/aij/seq/crl/crl.h>
14: PetscErrorCode MatDestroy_SeqAIJCRL(Mat A)
15: {
17: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
19: /* Free everything in the Mat_AIJCRL data structure. */
20: if (aijcrl) {
21: PetscFree2(aijcrl->acols,aijcrl->icols);
22: }
23: PetscFree(A->spptr);
24: PetscObjectChangeTypeName((PetscObject)A, MATSEQAIJ);
25: MatDestroy_SeqAIJ(A);
26: return(0);
27: }
29: PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M)
30: {
32: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
33: return(0);
34: }
36: PetscErrorCode MatSeqAIJCRL_create_aijcrl(Mat A)
37: {
38: Mat_SeqAIJ *a = (Mat_SeqAIJ*)(A)->data;
39: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
40: PetscInt m = A->rmap->n; /* Number of rows in the matrix. */
41: PetscInt *aj = a->j; /* From the CSR representation; points to the beginning of each row. */
42: PetscInt i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
43: MatScalar *aa = a->a;
44: PetscScalar *acols;
48: aijcrl->nz = a->nz;
49: aijcrl->m = A->rmap->n;
50: aijcrl->rmax = rmax;
52: PetscFree2(aijcrl->acols,aijcrl->icols);
53: PetscMalloc2(rmax*m,&aijcrl->acols,rmax*m,&aijcrl->icols);
54: acols = aijcrl->acols;
55: icols = aijcrl->icols;
56: for (i=0; i<m; i++) {
57: for (j=0; j<ilen[i]; j++) {
58: acols[j*m+i] = *aa++;
59: icols[j*m+i] = *aj++;
60: }
61: for (; j<rmax; j++) { /* empty column entries */
62: acols[j*m+i] = 0.0;
63: icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0; /* handle case where row is EMPTY */
64: }
65: }
66: PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %g. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);
67: return(0);
68: }
70: PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode)
71: {
73: Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
76: a->inode.use = PETSC_FALSE;
78: MatAssemblyEnd_SeqAIJ(A,mode);
79: if (mode == MAT_FLUSH_ASSEMBLY) return(0);
81: /* Now calculate the permutation and grouping information. */
82: MatSeqAIJCRL_create_aijcrl(A);
83: return(0);
84: }
86: #include <../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h>
88: /*
89: Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
90: - the scatter is used only in the parallel version
92: */
93: PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)
94: {
95: Mat_AIJCRL *aijcrl = (Mat_AIJCRL*) A->spptr;
96: PetscInt m = aijcrl->m; /* Number of rows in the matrix. */
97: PetscInt rmax = aijcrl->rmax,*icols = aijcrl->icols;
98: PetscScalar *acols = aijcrl->acols;
99: PetscErrorCode ierr;
100: PetscScalar *y;
101: const PetscScalar *x;
102: #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
103: PetscInt i,j,ii;
104: #endif
106: #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
107: #pragma disjoint(*x,*y,*aa)
108: #endif
111: if (aijcrl->xscat) {
112: VecCopy(xx,aijcrl->xwork);
113: /* get remote values needed for local part of multiply */
114: VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);
115: VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);
116: xx = aijcrl->xwork;
117: }
119: VecGetArrayRead(xx,&x);
120: VecGetArray(yy,&y);
122: #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
123: fortranmultcrl_(&m,&rmax,x,y,icols,acols);
124: #else
126: /* first column */
127: for (j=0; j<m; j++) y[j] = acols[j]*x[icols[j]];
129: /* other columns */
130: #if defined(PETSC_HAVE_CRAY_VECTOR)
131: #pragma _CRI preferstream
132: #endif
133: for (i=1; i<rmax; i++) {
134: ii = i*m;
135: #if defined(PETSC_HAVE_CRAY_VECTOR)
136: #pragma _CRI prefervector
137: #endif
138: for (j=0; j<m; j++) y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
139: }
140: #if defined(PETSC_HAVE_CRAY_VECTOR)
141: #pragma _CRI ivdep
142: #endif
144: #endif
145: PetscLogFlops(2.0*aijcrl->nz - m);
146: VecRestoreArrayRead(xx,&x);
147: VecRestoreArray(yy,&y);
148: return(0);
149: }
152: /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
153: * SeqAIJCRL matrix. This routine is called by the MatCreate_SeqAIJCRL()
154: * routine, but can also be used to convert an assembled SeqAIJ matrix
155: * into a SeqAIJCRL one. */
156: PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCRL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
157: {
159: Mat B = *newmat;
160: Mat_AIJCRL *aijcrl;
161: PetscBool sametype;
164: if (reuse == MAT_INITIAL_MATRIX) {
165: MatDuplicate(A,MAT_COPY_VALUES,&B);
166: }
167: PetscObjectTypeCompare((PetscObject)A,type,&sametype);
168: if (sametype) return(0);
170: PetscNewLog(B,&aijcrl);
171: B->spptr = (void*) aijcrl;
173: /* Set function pointers for methods that we inherit from AIJ but override. */
174: B->ops->duplicate = MatDuplicate_AIJCRL;
175: B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
176: B->ops->destroy = MatDestroy_SeqAIJCRL;
177: B->ops->mult = MatMult_AIJCRL;
179: /* If A has already been assembled, compute the permutation. */
180: if (A->assembled) {
181: MatSeqAIJCRL_create_aijcrl(B);
182: }
183: PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);
184: *newmat = B;
185: return(0);
186: }
188: /*@C
189: MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
190: This type inherits from AIJ, but stores some additional
191: information that is used to allow better vectorization of
192: the matrix-vector product. At the cost of increased storage, the AIJ formatted
193: matrix can be copied to a format in which pieces of the matrix are
194: stored in ELLPACK format, allowing the vectorized matrix multiply
195: routine to use stride-1 memory accesses. As with the AIJ type, it is
196: important to preallocate matrix storage in order to get good assembly
197: performance.
199: Collective on MPI_Comm
201: Input Parameters:
202: + comm - MPI communicator, set to PETSC_COMM_SELF
203: . m - number of rows
204: . n - number of columns
205: . nz - number of nonzeros per row (same for all rows)
206: - nnz - array containing the number of nonzeros in the various rows
207: (possibly different for each row) or NULL
209: Output Parameter:
210: . A - the matrix
212: Notes:
213: If nnz is given then nz is ignored
215: Level: intermediate
217: .keywords: matrix, cray, sparse, parallel
219: .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
220: @*/
221: PetscErrorCode MatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
222: {
226: MatCreate(comm,A);
227: MatSetSizes(*A,m,n,m,n);
228: MatSetType(*A,MATSEQAIJCRL);
229: MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);
230: return(0);
231: }
233: PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCRL(Mat A)
234: {
238: MatSetType(A,MATSEQAIJ);
239: MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_INPLACE_MATRIX,&A);
240: return(0);
241: }