Actual source code: mpiaij.h
petsc-3.13.6 2020-09-29
6: #include <../src/mat/impls/aij/seq/aij.h>
8: typedef struct { /* used by MatCreateMPIAIJSumSeqAIJ for reusing the merged matrix */
9: PetscLayout rowmap;
10: PetscInt **buf_ri,**buf_rj;
11: PetscMPIInt *len_s,*len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */
12: PetscMPIInt nsend,nrecv;
13: PetscInt *bi,*bj; /* i and j array of the local portion of mpi C (matrix product) - rename to ci, cj! */
14: PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */
15: PetscErrorCode (*destroy)(Mat);
16: PetscErrorCode (*duplicate)(Mat,MatDuplicateOption,Mat*);
17: } Mat_Merge_SeqsToMPI;
19: typedef struct { /* used by MatPtAPXXX_MPIAIJ_MPIAIJ() and MatMatMultXXX_MPIAIJ_MPIAIJ() */
20: PetscInt *startsj_s,*startsj_r; /* used by MatGetBrowsOfAoCols_MPIAIJ */
21: PetscScalar *bufa; /* used by MatGetBrowsOfAoCols_MPIAIJ */
22: Mat P_loc,P_oth; /* partial B_seq -- intend to replace B_seq */
23: PetscInt *api,*apj; /* symbolic i and j arrays of the local product A_loc*B_seq */
24: PetscScalar *apv;
25: MatReuse reuse; /* flag to skip MatGetBrowsOfAoCols_MPIAIJ() and MatMPIAIJGetLocalMat() in 1st call of MatPtAPNumeric_MPIAIJ_MPIAIJ() */
26: PetscScalar *apa; /* tmp array for store a row of A*P used in MatMatMult() */
27: Mat A_loc; /* used by MatTransposeMatMult(), contains api and apj */
28: ISLocalToGlobalMapping ltog; /* mapping from local column indices to global column indices for A_loc */
29: Mat Pt; /* used by MatTransposeMatMult(), Pt = P^T */
30: PetscBool freestruct; /* flag for MatFreeIntermediateDataStructures() */
31: Mat Rd,Ro,AP_loc,C_loc,C_oth;
32: PetscInt algType; /* implementation algorithm */
33: PetscSF sf; /* use it to communicate remote part of C */
34: PetscInt *c_othi,*c_rmti;
36: Mat_Merge_SeqsToMPI *merge;
37: PetscErrorCode (*destroy)(Mat);
38: PetscErrorCode (*duplicate)(Mat,MatDuplicateOption,Mat*);
39: PetscErrorCode (*view)(Mat,PetscViewer);
40: } Mat_APMPI;
42: typedef struct {
43: Mat A,B; /* local submatrices: A (diag part),
44: B (off-diag part) */
45: PetscMPIInt size; /* size of communicator */
46: PetscMPIInt rank; /* rank of proc in communicator */
48: /* The following variables are used for matrix assembly */
49: PetscBool donotstash; /* PETSC_TRUE if off processor entries dropped */
50: MPI_Request *send_waits; /* array of send requests */
51: MPI_Request *recv_waits; /* array of receive requests */
52: PetscInt nsends,nrecvs; /* numbers of sends and receives */
53: PetscScalar *svalues,*rvalues; /* sending and receiving data */
54: PetscInt rmax; /* maximum message length */
55: #if defined(PETSC_USE_CTABLE)
56: PetscTable colmap;
57: #else
58: PetscInt *colmap; /* local col number of off-diag col */
59: #endif
60: PetscInt *garray; /* global index of all off-processor columns */
62: /* The following variables are used for matrix-vector products */
63: Vec lvec; /* local vector */
64: Vec diag;
65: VecScatter Mvctx,Mvctx_mpi1; /* scatter context for vector */
66: PetscBool Mvctx_mpi1_flg; /* if true, additional Mvctx_mpi1 is requested for mat-mat ops, default false */
67: PetscBool roworiented; /* if true, row-oriented input, default true */
69: /* The following variables are for MatGetRow() */
70: PetscInt *rowindices; /* column indices for row */
71: PetscScalar *rowvalues; /* nonzero values in row */
72: PetscBool getrowactive; /* indicates MatGetRow(), not restored */
74: /* Used by MatDistribute_MPIAIJ() to allow reuse of previous matrix allocation and nonzero pattern */
75: PetscInt *ld; /* number of entries per row left of diagona block */
77: Mat_APMPI *ap; /* used by MatMatMult() and MatPtAP() */
78: Mat_RARt *rart; /* used by MatRARt() */
79: Mat_MatMatMatMult *matmatmatmult; /* used by MatMatMatMult() */
81: /* Used by MPICUSP and MPICUSPARSE classes */
82: void * spptr;
84: } Mat_MPIAIJ;
86: PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat);
88: PETSC_INTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType);
90: PETSC_INTERN PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat);
91: PETSC_INTERN PetscErrorCode MatDisAssemble_MPIAIJ(Mat);
92: PETSC_INTERN PetscErrorCode MatDuplicate_MPIAIJ(Mat,MatDuplicateOption,Mat*);
93: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat,PetscInt,IS [],PetscInt);
94: PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat,PetscInt,IS [],PetscInt);
95: PETSC_INTERN PetscErrorCode MatFDColoringCreate_MPIXAIJ(Mat,ISColoring,MatFDColoring);
96: PETSC_INTERN PetscErrorCode MatFDColoringSetUp_MPIXAIJ(Mat,ISColoring,MatFDColoring);
97: PETSC_INTERN PetscErrorCode MatCreateSubMatrices_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
98: PETSC_INTERN PetscErrorCode MatCreateSubMatricesMPI_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
99: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_All(Mat,MatCreateSubMatrixOption,MatReuse,Mat *[]);
100: PETSC_INTERN PetscErrorCode MatView_MPIAIJ(Mat,PetscViewer);
102: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat,IS,IS,MatReuse,Mat*);
103: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat,IS,IS,PetscInt,MatReuse,Mat*);
104: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat,IS,IS,IS,MatReuse,Mat*);
105: PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat,IS,IS,MatReuse,Mat*);
106: PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);
108: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat,PetscViewer);
109: PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ_Binary(Mat,PetscViewer);
110: PETSC_INTERN PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat);
112: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat);
113: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat);
115: PETSC_INTERN PetscErrorCode MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ(Mat);
117: PETSC_INTERN PetscErrorCode MatProductSymbolic_RARt_MPIAIJ_MPIAIJ(Mat);
118: PETSC_INTERN PetscErrorCode MatProductNumeric_RARt_MPIAIJ_MPIAIJ(Mat);
120: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat);
121: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat,Mat,PetscReal,Mat);
122: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
123: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
124: PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);
126: PETSC_INTERN PetscErrorCode MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,PetscReal,Mat);
127: PETSC_INTERN PetscErrorCode MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,Mat);
129: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
130: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
132: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat,Mat,PetscReal,Mat);
133: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,PetscReal,Mat);
134: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,PetscReal,Mat);
135: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat,Mat,Mat);
136: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,Mat);
137: PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,Mat);
138: PETSC_INTERN PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat);
139: PETSC_INTERN PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_BC(Mat);
141: #if defined(PETSC_HAVE_HYPRE)
142: PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
143: #endif
145: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat);
146: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
148: PETSC_INTERN PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat,Mat,MatReuse,PetscInt**,PetscInt**,MatScalar**,Mat*);
149: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar [],InsertMode);
150: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
151: PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat,const PetscInt[],const PetscInt[]);
152: PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat);
153: PETSC_INTERN PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void*);
154: PETSC_INTERN PetscErrorCode MatSetOption_MPIAIJ(Mat,MatOption,PetscBool);
156: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat);
157: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
158: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
159: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);
160: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat,Mat,Mat);
161: PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
162: PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
163: PETSC_INTERN PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat,Mat*);
165: PETSC_INTERN PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems*,Mat);
166: PETSC_INTERN PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
168: #if !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_REAL_SINGLE) && !defined(PETSC_USE_REAL___FLOAT128) && !defined(PETSC_USE_REAL___FP16)
169: PETSC_INTERN PetscErrorCode MatLUFactorSymbolic_MPIAIJ_TFS(Mat,IS,IS,const MatFactorInfo*,Mat*);
170: #endif
171: PETSC_INTERN PetscErrorCode MatSolve_MPIAIJ(Mat,Vec,Vec);
172: PETSC_INTERN PetscErrorCode MatILUFactor_MPIAIJ(Mat,IS,IS,const MatFactorInfo*);
174: PETSC_INTERN PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,PetscInt*);
176: extern PetscErrorCode MatGetDiagonalBlock_MPIAIJ(Mat,Mat*);
177: extern PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat,Vec);
179: PETSC_INTERN PetscErrorCode MatGetSeqMats_MPIAIJ(Mat,Mat*,Mat*);
180: PETSC_INTERN PetscErrorCode MatSetSeqMats_MPIAIJ(Mat,IS,IS,IS,MatStructure,Mat,Mat);
182: /* compute apa = A[i,:]*P = Ad[i,:]*P_loc + Ao*[i,:]*P_oth using sparse axpy */
183: #define AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa) \
184: {\
185: PetscInt _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj,_nextp,*_apJ;\
186: PetscScalar *_aa,_valtmp,*_pa;\
187: _apJ = apj + api[i];\
188: /* diagonal portion of A */\
189: _ai = ad->i;\
190: _anz = _ai[i+1] - _ai[i];\
191: _aj = ad->j + _ai[i];\
192: _aa = ad->a + _ai[i];\
193: for (_j=0; _j<_anz; _j++) {\
194: _row = _aj[_j]; \
195: _pi = p_loc->i; \
196: _pnz = _pi[_row+1] - _pi[_row]; \
197: _pj = p_loc->j + _pi[_row]; \
198: _pa = p_loc->a + _pi[_row]; \
199: /* perform sparse axpy */ \
200: _valtmp = _aa[_j]; \
201: _nextp = 0; \
202: for (_k=0; _nextp<_pnz; _k++) { \
203: if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
204: apa[_k] += _valtmp*_pa[_nextp++]; \
205: } \
206: } \
207: (void)PetscLogFlops(2.0*_pnz); \
208: } \
209: /* off-diagonal portion of A */ \
210: if (p_oth){ \
211: _ai = ao->i;\
212: _anz = _ai[i+1] - _ai[i]; \
213: _aj = ao->j + _ai[i]; \
214: _aa = ao->a + _ai[i]; \
215: for (_j=0; _j<_anz; _j++) { \
216: _row = _aj[_j]; \
217: _pi = p_oth->i; \
218: _pnz = _pi[_row+1] - _pi[_row]; \
219: _pj = p_oth->j + _pi[_row]; \
220: _pa = p_oth->a + _pi[_row]; \
221: /* perform sparse axpy */ \
222: _valtmp = _aa[_j]; \
223: _nextp = 0; \
224: for (_k=0; _nextp<_pnz; _k++) { \
225: if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
226: apa[_k] += _valtmp*_pa[_nextp++]; \
227: } \
228: } \
229: (void)PetscLogFlops(2.0*_pnz); \
230: } \
231: }\
232: }
234: #define AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa) \
235: {\
236: PetscInt _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj;\
237: PetscScalar *_aa,_valtmp,*_pa; \
238: /* diagonal portion of A */\
239: _ai = ad->i;\
240: _anz = _ai[i+1] - _ai[i];\
241: _aj = ad->j + _ai[i];\
242: _aa = ad->a + _ai[i];\
243: for (_j=0; _j<_anz; _j++) {\
244: _row = _aj[_j]; \
245: _pi = p_loc->i; \
246: _pnz = _pi[_row+1] - _pi[_row]; \
247: _pj = p_loc->j + _pi[_row]; \
248: _pa = p_loc->a + _pi[_row]; \
249: /* perform dense axpy */ \
250: _valtmp = _aa[_j]; \
251: for (_k=0; _k<_pnz; _k++) { \
252: apa[_pj[_k]] += _valtmp*_pa[_k]; \
253: } \
254: (void)PetscLogFlops(2.0*_pnz); \
255: } \
256: /* off-diagonal portion of A */ \
257: if (p_oth){ \
258: _ai = ao->i;\
259: _anz = _ai[i+1] - _ai[i]; \
260: _aj = ao->j + _ai[i]; \
261: _aa = ao->a + _ai[i]; \
262: for (_j=0; _j<_anz; _j++) { \
263: _row = _aj[_j]; \
264: _pi = p_oth->i; \
265: _pnz = _pi[_row+1] - _pi[_row]; \
266: _pj = p_oth->j + _pi[_row]; \
267: _pa = p_oth->a + _pi[_row]; \
268: /* perform dense axpy */ \
269: _valtmp = _aa[_j]; \
270: for (_k=0; _k<_pnz; _k++) { \
271: apa[_pj[_k]] += _valtmp*_pa[_k]; \
272: } \
273: (void)PetscLogFlops(2.0*_pnz); \
274: } \
275: }\
276: }
278: #endif