Actual source code: mpb_aij.c
petsc-3.11.4 2019-09-28
1: #include <../src/mat/impls/aij/mpi/mpiaij.h>
3: PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
4: {
6: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
7: Mat_SeqAIJ *aijB = (Mat_SeqAIJ*)aij->B->data;
8: PetscMPIInt commRank,subCommSize,subCommRank;
9: PetscMPIInt *commRankMap,subRank,rank,commsize;
10: PetscInt *garrayCMap,col,i,j,*nnz,newRow,newCol;
13: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
14: MPI_Comm_size(subComm,&subCommSize);
16: /* create subMat object with the relavent layout */
17: if (scall == MAT_INITIAL_MATRIX) {
18: MatCreate(subComm,subMat);
19: MatSetType(*subMat,MATMPIAIJ);
20: MatSetSizes(*subMat,mat->rmap->n,mat->cmap->n,PETSC_DECIDE,PETSC_DECIDE);
21: MatSetBlockSizesFromMats(*subMat,mat,mat);
23: /* need to setup rmap and cmap before Preallocation */
24: PetscLayoutSetUp((*subMat)->rmap);
25: PetscLayoutSetUp((*subMat)->cmap);
26: }
28: /* create a map of comm_rank from subComm to comm - should commRankMap and garrayCMap be kept for reused? */
29: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&commRank);
30: MPI_Comm_rank(subComm,&subCommRank);
31: PetscMalloc1(subCommSize,&commRankMap);
32: MPI_Allgather(&commRank,1,MPI_INT,commRankMap,1,MPI_INT,subComm);
34: /* Traverse garray and identify column indices [of offdiag mat] that
35: should be discarded. For the ones not discarded, store the newCol+1
36: value in garrayCMap */
37: PetscCalloc1(aij->B->cmap->n,&garrayCMap);
38: for (i=0; i<aij->B->cmap->n; i++) {
39: col = aij->garray[i];
40: for (subRank=0; subRank<subCommSize; subRank++) {
41: rank = commRankMap[subRank];
42: if ((col >= mat->cmap->range[rank]) && (col < mat->cmap->range[rank+1])) {
43: garrayCMap[i] = (*subMat)->cmap->range[subRank] + col - mat->cmap->range[rank]+1;
44: break;
45: }
46: }
47: }
49: if (scall == MAT_INITIAL_MATRIX) {
50: /* Now compute preallocation for the offdiag mat */
51: PetscCalloc1(aij->B->rmap->n,&nnz);
52: for (i=0; i<aij->B->rmap->n; i++) {
53: for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
54: if (garrayCMap[aijB->j[j]]) nnz[i]++;
55: }
56: }
57: MatMPIAIJSetPreallocation(*(subMat),0,NULL,0,nnz);
59: /* reuse diag block with the new submat */
60: MatDestroy(&((Mat_MPIAIJ*)((*subMat)->data))->A);
62: ((Mat_MPIAIJ*)((*subMat)->data))->A = aij->A;
64: PetscObjectReference((PetscObject)aij->A);
65: } else if (((Mat_MPIAIJ*)(*subMat)->data)->A != aij->A) {
66: PetscObject obj = (PetscObject)((Mat_MPIAIJ*)((*subMat)->data))->A;
68: PetscObjectReference((PetscObject)obj);
70: ((Mat_MPIAIJ*)((*subMat)->data))->A = aij->A;
72: PetscObjectReference((PetscObject)aij->A);
73: }
75: /* Now traverse aij->B and insert values into subMat */
76: for (i=0; i<aij->B->rmap->n; i++) {
77: newRow = (*subMat)->rmap->range[subCommRank] + i;
78: for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
79: newCol = garrayCMap[aijB->j[j]];
80: if (newCol) {
81: newCol--; /* remove the increment */
82: MatSetValues(*subMat,1,&newRow,1,&newCol,(aijB->a+j),INSERT_VALUES);
83: }
84: }
85: }
87: /* assemble the submat */
88: MatAssemblyBegin(*subMat,MAT_FINAL_ASSEMBLY);
89: MatAssemblyEnd(*subMat,MAT_FINAL_ASSEMBLY);
91: /* deallocate temporary data */
92: PetscFree(commRankMap);
93: PetscFree(garrayCMap);
94: if (scall == MAT_INITIAL_MATRIX) {
95: PetscFree(nnz);
96: }
97: return(0);
98: }