Actual source code: overlapsplit.c
petsc-3.10.5 2019-03-28
1: /*
2: * overlapsplit.c: increase the overlap of a 'big' subdomain across several processor cores
3: *
4: * Author: Fande Kong <fdkong.jd@gmail.com>
5: */
7: #include <petscsf.h>
8: #include <petsc/private/matimpl.h>
12: /*
13: * Increase overlap for the sub-matrix across sub communicator
14: * sub-matrix could be a graph or numerical matrix
15: * */
16: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
17: {
18: PetscInt i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
19: PetscInt *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
20: const PetscInt *indices;
21: PetscMPIInt srank,ssize,issamecomm,k,grank;
22: IS is_sc,allis_sc,partitioning;
23: MPI_Comm gcomm,dcomm,scomm;
24: PetscSF sf;
25: PetscSFNode *remote;
26: Mat *smat;
27: MatPartitioning part;
28: PetscErrorCode ierr;
31: /* get a sub communicator before call individual MatIncreaseOverlap
32: * since the sub communicator may be changed.
33: * */
34: PetscObjectGetComm((PetscObject)(*is),&dcomm);
35: /* make a copy before the original one is deleted */
36: PetscCommDuplicate(dcomm,&scomm,NULL);
37: /* get a global communicator, where mat should be a global matrix */
38: PetscObjectGetComm((PetscObject)mat,&gcomm);
39: (*mat->ops->increaseoverlap)(mat,1,is,ov);
40: MPI_Comm_compare(gcomm,scomm,&issamecomm);
41: /* if the sub-communicator is the same as the global communicator,
42: * user does not want to use a sub-communicator
43: * */
44: if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
45: PetscCommDestroy(&scomm);
46: return(0);
47: }
48: /* if the sub-communicator is petsc_comm_self,
49: * user also does not care the sub-communicator
50: * */
51: MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);
52: if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
53: PetscCommDestroy(&scomm);
54: return(0);
55: }
56: MPI_Comm_rank(scomm,&srank);
57: MPI_Comm_size(scomm,&ssize);
58: MPI_Comm_rank(gcomm,&grank);
59: /* create a new IS based on sub-communicator
60: * since the old IS is often based on petsc_comm_self
61: * */
62: ISGetLocalSize(*is,&nindx);
63: PetscCalloc1(nindx,&indices_sc);
64: ISGetIndices(*is,&indices);
65: PetscMemcpy(indices_sc,indices,sizeof(PetscInt)*nindx);
66: ISRestoreIndices(*is,&indices);
67: /* we do not need any more */
68: ISDestroy(is);
69: /* create a index set based on the sub communicator */
70: ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);
71: /* gather all indices within the sub communicator */
72: ISAllGather(is_sc,&allis_sc);
73: ISDestroy(&is_sc);
74: /* gather local sizes */
75: PetscMalloc1(ssize,&localsizes_sc);
76: /* get individual local sizes for all index sets */
77: MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);
78: /* only root does these computations */
79: if(!srank){
80: /* get local size for the big index set */
81: ISGetLocalSize(allis_sc,&localsize);
82: PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);
83: PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);
84: ISGetIndices(allis_sc,&indices);
85: PetscMemcpy(indices_ov,indices,sizeof(PetscInt)*localsize);
86: ISRestoreIndices(allis_sc,&indices);
87: ISDestroy(&allis_sc);
88: /* assign corresponding sources */
89: localsize_tmp = 0;
90: for(k=0; k<ssize; k++){
91: for(i=0; i<localsizes_sc[k]; i++){
92: sources_sc[localsize_tmp++] = k;
93: }
94: }
95: /* record where indices come from */
96: PetscSortIntWithArray(localsize,indices_ov,sources_sc);
97: /* count local sizes for reduced indices */
98: PetscMemzero(localsizes_sc,sizeof(PetscInt)*ssize);
99: /* initialize the first entity */
100: if(localsize){
101: indices_ov_rd[0] = indices_ov[0];
102: sources_sc_rd[0] = sources_sc[0];
103: localsizes_sc[sources_sc[0]]++;
104: }
105: localsize_tmp = 1;
106: /* remove duplicate integers */
107: for(i=1; i<localsize; i++){
108: if(indices_ov[i] != indices_ov[i-1]){
109: indices_ov_rd[localsize_tmp] = indices_ov[i];
110: sources_sc_rd[localsize_tmp++] = sources_sc[i];
111: localsizes_sc[sources_sc[i]]++;
112: }
113: }
114: PetscFree2(indices_ov,sources_sc);
115: PetscCalloc1(ssize+1,&localoffsets);
116: for(k=0; k<ssize; k++){
117: localoffsets[k+1] = localoffsets[k] + localsizes_sc[k];
118: }
119: nleaves = localoffsets[ssize];
120: PetscMemzero(localoffsets,(ssize+1)*sizeof(PetscInt));
121: nroots = localsizes_sc[srank];
122: PetscCalloc1(nleaves,&remote);
123: for(i=0; i<nleaves; i++){
124: remote[i].rank = sources_sc_rd[i];
125: remote[i].index = localoffsets[sources_sc_rd[i]]++;
126: }
127: PetscFree(localoffsets);
128: }else{
129: ISDestroy(&allis_sc);
130: /* Allocate a 'zero' pointer to avoid using uninitialized variable */
131: PetscCalloc1(0,&remote);
132: nleaves = 0;
133: indices_ov_rd = 0;
134: sources_sc_rd = 0;
135: }
136: /* scatter sizes to everybody */
137: MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm);
138: PetscFree(localsizes_sc);
139: PetscCalloc1(nroots,&indices_recv);
140: /* set data back to every body */
141: PetscSFCreate(scomm,&sf);
142: PetscSFSetType(sf,PETSCSFBASIC);
143: PetscSFSetFromOptions(sf);
144: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);
145: PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
146: PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
147: PetscSFDestroy(&sf);
148: PetscFree2(indices_ov_rd,sources_sc_rd);
149: ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc);
150: MatCreateSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat);
151: ISDestroy(&allis_sc);
152: /* create a partitioner to repartition the sub-matrix */
153: MatPartitioningCreate(scomm,&part);
154: MatPartitioningSetAdjacency(part,smat[0]);
155: #if defined(PETSC_HAVE_PARMETIS)
156: /* if there exists a ParMETIS installation, we try to use ParMETIS
157: * because a repartition routine possibly work better
158: * */
159: MatPartitioningSetType(part,MATPARTITIONINGPARMETIS);
160: /* try to use reparition function, instead of partition function */
161: MatPartitioningParmetisSetRepartition(part);
162: #else
163: /* we at least provide a default partitioner to rebalance the computation */
164: MatPartitioningSetType(part,MATPARTITIONINGAVERAGE);
165: #endif
166: /* user can pick up any partitioner by using an option */
167: MatPartitioningSetFromOptions(part);
168: MatPartitioningApply(part,&partitioning);
169: MatPartitioningDestroy(&part);
170: MatDestroy(&(smat[0]));
171: PetscFree(smat);
172: /* get local rows including overlap */
173: ISBuildTwoSided(partitioning,is_sc,is);
174: ISDestroy(&is_sc);
175: ISDestroy(&partitioning);
176: PetscCommDestroy(&scomm);
177: return(0);
178: }