Actual source code: overlapsplit.c
petsc-3.14.6 2021-03-30
1: /*
2: * Increase the overlap of a 'big' subdomain across several processor cores
3: *
4: * Author: Fande Kong <fdkong.jd@gmail.com>
5: */
7: #include <petscsf.h>
8: #include <petsc/private/matimpl.h>
10: /*
11: * Increase overlap for the sub-matrix across sub communicator
12: * sub-matrix could be a graph or numerical matrix
13: * */
14: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
15: {
16: PetscInt i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
17: PetscInt *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
18: const PetscInt *indices;
19: PetscMPIInt srank,ssize,issamecomm,k,grank;
20: IS is_sc,allis_sc,partitioning;
21: MPI_Comm gcomm,dcomm,scomm;
22: PetscSF sf;
23: PetscSFNode *remote;
24: Mat *smat;
25: MatPartitioning part;
26: PetscErrorCode ierr;
29: /* get a sub communicator before call individual MatIncreaseOverlap
30: * since the sub communicator may be changed.
31: * */
32: PetscObjectGetComm((PetscObject)(*is),&dcomm);
33: /* make a copy before the original one is deleted */
34: PetscCommDuplicate(dcomm,&scomm,NULL);
35: /* get a global communicator, where mat should be a global matrix */
36: PetscObjectGetComm((PetscObject)mat,&gcomm);
37: (*mat->ops->increaseoverlap)(mat,1,is,ov);
38: MPI_Comm_compare(gcomm,scomm,&issamecomm);
39: /* if the sub-communicator is the same as the global communicator,
40: * user does not want to use a sub-communicator
41: * */
42: if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
43: PetscCommDestroy(&scomm);
44: return(0);
45: }
46: /* if the sub-communicator is petsc_comm_self,
47: * user also does not care the sub-communicator
48: * */
49: MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);
50: if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
51: PetscCommDestroy(&scomm);
52: return(0);
53: }
54: MPI_Comm_rank(scomm,&srank);
55: MPI_Comm_size(scomm,&ssize);
56: MPI_Comm_rank(gcomm,&grank);
57: /* create a new IS based on sub-communicator
58: * since the old IS is often based on petsc_comm_self
59: * */
60: ISGetLocalSize(*is,&nindx);
61: PetscMalloc1(nindx,&indices_sc);
62: ISGetIndices(*is,&indices);
63: PetscArraycpy(indices_sc,indices,nindx);
64: ISRestoreIndices(*is,&indices);
65: /* we do not need any more */
66: ISDestroy(is);
67: /* create a index set based on the sub communicator */
68: ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);
69: /* gather all indices within the sub communicator */
70: ISAllGather(is_sc,&allis_sc);
71: ISDestroy(&is_sc);
72: /* gather local sizes */
73: PetscMalloc1(ssize,&localsizes_sc);
74: /* get individual local sizes for all index sets */
75: MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);
76: /* only root does these computations */
77: if (!srank){
78: /* get local size for the big index set */
79: ISGetLocalSize(allis_sc,&localsize);
80: PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);
81: PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);
82: ISGetIndices(allis_sc,&indices);
83: PetscArraycpy(indices_ov,indices,localsize);
84: ISRestoreIndices(allis_sc,&indices);
85: ISDestroy(&allis_sc);
86: /* assign corresponding sources */
87: localsize_tmp = 0;
88: for (k=0; k<ssize; k++){
89: for (i=0; i<localsizes_sc[k]; i++){
90: sources_sc[localsize_tmp++] = k;
91: }
92: }
93: /* record where indices come from */
94: PetscSortIntWithArray(localsize,indices_ov,sources_sc);
95: /* count local sizes for reduced indices */
96: PetscArrayzero(localsizes_sc,ssize);
97: /* initialize the first entity */
98: if (localsize){
99: indices_ov_rd[0] = indices_ov[0];
100: sources_sc_rd[0] = sources_sc[0];
101: localsizes_sc[sources_sc[0]]++;
102: }
103: localsize_tmp = 1;
104: /* remove duplicate integers */
105: for (i=1; i<localsize; i++){
106: if (indices_ov[i] != indices_ov[i-1]){
107: indices_ov_rd[localsize_tmp] = indices_ov[i];
108: sources_sc_rd[localsize_tmp++] = sources_sc[i];
109: localsizes_sc[sources_sc[i]]++;
110: }
111: }
112: PetscFree2(indices_ov,sources_sc);
113: PetscCalloc1(ssize+1,&localoffsets);
114: for (k=0; k<ssize; k++){
115: localoffsets[k+1] = localoffsets[k] + localsizes_sc[k];
116: }
117: nleaves = localoffsets[ssize];
118: PetscArrayzero(localoffsets,ssize+1);
119: nroots = localsizes_sc[srank];
120: PetscMalloc1(nleaves,&remote);
121: for (i=0; i<nleaves; i++){
122: remote[i].rank = sources_sc_rd[i];
123: remote[i].index = localoffsets[sources_sc_rd[i]]++;
124: }
125: PetscFree(localoffsets);
126: } else {
127: ISDestroy(&allis_sc);
128: /* Allocate a 'zero' pointer to avoid using uninitialized variable */
129: PetscCalloc1(0,&remote);
130: nleaves = 0;
131: indices_ov_rd = NULL;
132: sources_sc_rd = NULL;
133: }
134: /* scatter sizes to everybody */
135: MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm);
136: PetscFree(localsizes_sc);
137: PetscCalloc1(nroots,&indices_recv);
138: /* set data back to every body */
139: PetscSFCreate(scomm,&sf);
140: PetscSFSetType(sf,PETSCSFBASIC);
141: PetscSFSetFromOptions(sf);
142: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);
143: PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
144: PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
145: PetscSFDestroy(&sf);
146: PetscFree2(indices_ov_rd,sources_sc_rd);
147: ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc);
148: MatCreateSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat);
149: ISDestroy(&allis_sc);
150: /* create a partitioner to repartition the sub-matrix */
151: MatPartitioningCreate(scomm,&part);
152: MatPartitioningSetAdjacency(part,smat[0]);
153: #if defined(PETSC_HAVE_PARMETIS)
154: /* if there exists a ParMETIS installation, we try to use ParMETIS
155: * because a repartition routine possibly work better
156: * */
157: MatPartitioningSetType(part,MATPARTITIONINGPARMETIS);
158: /* try to use reparition function, instead of partition function */
159: MatPartitioningParmetisSetRepartition(part);
160: #else
161: /* we at least provide a default partitioner to rebalance the computation */
162: MatPartitioningSetType(part,MATPARTITIONINGAVERAGE);
163: #endif
164: /* user can pick up any partitioner by using an option */
165: MatPartitioningSetFromOptions(part);
166: MatPartitioningApply(part,&partitioning);
167: MatPartitioningDestroy(&part);
168: MatDestroy(&(smat[0]));
169: PetscFree(smat);
170: /* get local rows including overlap */
171: ISBuildTwoSided(partitioning,is_sc,is);
172: ISDestroy(&is_sc);
173: ISDestroy(&partitioning);
174: PetscCommDestroy(&scomm);
175: return(0);
176: }