Actual source code: mpb_baij.c
petsc-3.14.6 2021-03-30
1: #include <../src/mat/impls/baij/mpi/mpibaij.h>
3: PetscErrorCode MatGetMultiProcBlock_MPIBAIJ(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
4: {
6: Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)mat->data;
7: Mat_SeqBAIJ *aijB = (Mat_SeqBAIJ*)aij->B->data;
8: PetscMPIInt commRank,subCommSize,subCommRank;
9: PetscMPIInt *commRankMap,subRank,rank,commsize;
10: PetscInt *garrayCMap,col,i,j,*nnz,newRow,newCol,*newbRow,*newbCol,k,k1;
11: PetscInt bs=mat->rmap->bs;
12: PetscScalar *vals,*aijBvals;
15: MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);
16: MPI_Comm_size(subComm,&subCommSize);
18: /* create subMat object with the relavent layout */
19: if (scall == MAT_INITIAL_MATRIX) {
20: MatCreate(subComm,subMat);
21: MatSetType(*subMat,MATMPIBAIJ);
22: MatSetSizes(*subMat,mat->rmap->n,mat->cmap->n,PETSC_DECIDE,PETSC_DECIDE);
23: MatSetBlockSizes(*subMat,mat->rmap->bs,mat->cmap->bs);
25: /* need to setup rmap and cmap before Preallocation */
26: PetscLayoutSetBlockSize((*subMat)->rmap,mat->rmap->bs);
27: PetscLayoutSetBlockSize((*subMat)->cmap,mat->cmap->bs);
28: PetscLayoutSetUp((*subMat)->rmap);
29: PetscLayoutSetUp((*subMat)->cmap);
30: }
32: /* create a map of comm_rank from subComm to comm - should commRankMap and garrayCMap be kept for reused? */
33: MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&commRank);
34: MPI_Comm_rank(subComm,&subCommRank);
35: PetscMalloc1(subCommSize,&commRankMap);
36: MPI_Allgather(&commRank,1,MPI_INT,commRankMap,1,MPI_INT,subComm);
38: /* Traverse garray and identify blocked column indices [of offdiag mat] that
39: should be discarded. For the ones not discarded, store the newCol+1
40: value in garrayCMap */
41: PetscCalloc1(aij->B->cmap->n/bs,&garrayCMap);
42: for (i=0; i<aij->B->cmap->n/bs; i++) {
43: col = aij->garray[i]; /* blocked column index */
44: for (subRank=0; subRank<subCommSize; subRank++) {
45: rank = commRankMap[subRank];
46: if ((col >= mat->cmap->range[rank]/bs) && (col < mat->cmap->range[rank+1]/bs)) {
47: garrayCMap[i] = (((*subMat)->cmap->range[subRank]- mat->cmap->range[rank])/bs + col + 1);
48: break;
49: }
50: }
51: }
53: if (scall == MAT_INITIAL_MATRIX) {
54: /* Now compute preallocation for the offdiag mat */
55: PetscCalloc1(aij->B->rmap->n/bs,&nnz);
56: for (i=0; i<aij->B->rmap->n/bs; i++) {
57: for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
58: if (garrayCMap[aijB->j[j]]) nnz[i]++;
59: }
60: }
61: MatMPIBAIJSetPreallocation(*(subMat),bs,0,NULL,0,nnz);
63: /* reuse diag block with the new submat */
64: MatDestroy(&((Mat_MPIBAIJ*)((*subMat)->data))->A);
66: ((Mat_MPIBAIJ*)((*subMat)->data))->A = aij->A;
68: PetscObjectReference((PetscObject)aij->A);
69: } else if (((Mat_MPIBAIJ*)(*subMat)->data)->A != aij->A) {
70: PetscObject obj = (PetscObject)((Mat_MPIBAIJ*)((*subMat)->data))->A;
72: PetscObjectReference((PetscObject)obj);
74: ((Mat_MPIBAIJ*)((*subMat)->data))->A = aij->A;
76: PetscObjectReference((PetscObject)aij->A);
77: }
79: /* Now traverse aij->B and insert values into subMat */
80: PetscMalloc3(bs,&newbRow,bs,&newbCol,bs*bs,&vals);
81: for (i=0; i<aij->B->rmap->n/bs; i++) {
82: newRow = (*subMat)->rmap->range[subCommRank] + i*bs;
83: for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
84: newCol = garrayCMap[aijB->j[j]];
85: if (newCol) {
86: newCol--; /* remove the increment */
87: newCol *= bs;
88: for (k=0; k<bs; k++) {
89: newbRow[k] = newRow + k;
90: newbCol[k] = newCol + k;
91: }
92: /* copy column-oriented aijB->a into row-oriented vals */
93: aijBvals = aijB->a + j*bs*bs;
94: for (k1=0; k1<bs; k1++) {
95: for (k=0; k<bs; k++) {
96: vals[k1+k*bs] = *aijBvals++;
97: }
98: }
99: MatSetValues(*subMat,bs,newbRow,bs,newbCol,vals,INSERT_VALUES);
100: }
101: }
102: }
103: MatAssemblyBegin(*subMat,MAT_FINAL_ASSEMBLY);
104: MatAssemblyEnd(*subMat,MAT_FINAL_ASSEMBLY);
106: /* deallocate temporary data */
107: PetscFree3(newbRow,newbCol,vals);
108: PetscFree(commRankMap);
109: PetscFree(garrayCMap);
110: if (scall == MAT_INITIAL_MATRIX) {
111: PetscFree(nnz);
112: }
113: return(0);
114: }