Actual source code: overlapsplit.c

petsc-3.7.3 2016-08-01
Report Typos and Errors
  1: /*
  2:  * overlapsplit.c: increase the overlap of a 'big' subdomain across several processor cores
  3:  *
  4:  * Author: Fande Kong <fdkong.jd@gmail.com>
  5:  */

  7: #include <petscsf.h>
  8: #include <petsc/private/matimpl.h>



 14: /*
 15:  * Increase overlap for the sub-matrix across sub communicator
 16:  * sub-matrix could be a graph or numerical matrix
 17:  * */
 18: PetscErrorCode  MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
 19: {
 20:   PetscInt         i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
 21:   PetscInt         *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
 22:   const PetscInt   *indices;
 23:   PetscMPIInt      srank,ssize,issamecomm,k,grank;
 24:   IS               is_sc,allis_sc,partitioning;
 25:   MPI_Comm         gcomm,dcomm,scomm;
 26:   PetscSF          sf;
 27:   PetscSFNode      *remote;
 28:   Mat              *smat;
 29:   MatPartitioning  part;
 30:   PetscErrorCode   ierr;

 33:   /* get a sub communicator before call individual MatIncreaseOverlap
 34:    * since the sub communicator may be changed.
 35:    * */
 36:   PetscObjectGetComm((PetscObject)(*is),&dcomm);
 37:   /* make a copy before the original one is deleted */
 38:   PetscCommDuplicate(dcomm,&scomm,NULL);
 39:   /* get a global communicator, where mat should be a global matrix  */
 40:   PetscObjectGetComm((PetscObject)mat,&gcomm);
 41:   (*mat->ops->increaseoverlap)(mat,1,is,ov);
 42:   MPI_Comm_compare(gcomm,scomm,&issamecomm);
 43:   /* if the sub-communicator is the same as the global communicator,
 44:    * user does not want to use a sub-communicator
 45:    * */
 46:   if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
 47:         PetscCommDestroy(&scomm);
 48:         return(0);
 49:   }
 50:   /* if the sub-communicator is petsc_comm_self,
 51:    * user also does not care the sub-communicator
 52:    * */
 53:   MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);
 54:   if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){
 55:         PetscCommDestroy(&scomm);
 56:         return(0);
 57:   }
 58:   MPI_Comm_rank(scomm,&srank);
 59:   MPI_Comm_size(scomm,&ssize);
 60:   MPI_Comm_rank(gcomm,&grank);
 61:   /* create a new IS based on sub-communicator
 62:    * since the old IS is often based on petsc_comm_self
 63:    * */
 64:   ISGetLocalSize(*is,&nindx);
 65:   PetscCalloc1(nindx,&indices_sc);
 66:   ISGetIndices(*is,&indices);
 67:   PetscMemcpy(indices_sc,indices,sizeof(PetscInt)*nindx);
 68:   ISRestoreIndices(*is,&indices);
 69:   /* we do not need any more */
 70:   ISDestroy(is);
 71:   /* create a index set based on the sub communicator  */
 72:   ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);
 73:   /* gather all indices within  the sub communicator */
 74:   ISAllGather(is_sc,&allis_sc);
 75:   ISDestroy(&is_sc);
 76:   /* gather local sizes */
 77:   PetscMalloc1(ssize,&localsizes_sc);
 78:   /* get individual local sizes for all index sets */
 79:   MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);
 80:   /* only root does these computations */
 81:   if(!srank){
 82:    /* get local size for the big index set */
 83:    ISGetLocalSize(allis_sc,&localsize);
 84:    PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);
 85:    PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);
 86:    ISGetIndices(allis_sc,&indices);
 87:    PetscMemcpy(indices_ov,indices,sizeof(PetscInt)*localsize);
 88:    ISRestoreIndices(allis_sc,&indices);
 89:    ISDestroy(&allis_sc);
 90:    /* assign corresponding sources */
 91:    localsize_tmp = 0;
 92:    for(k=0; k<ssize; k++){
 93:      for(i=0; i<localsizes_sc[k]; i++){
 94:        sources_sc[localsize_tmp++] = k;
 95:      }
 96:    }
 97:    /* record where indices come from */
 98:    PetscSortIntWithArray(localsize,indices_ov,sources_sc);
 99:    /* count local sizes for reduced indices */
100:    PetscMemzero(localsizes_sc,sizeof(PetscInt)*ssize);
101:    /* initialize the first entity */
102:    if(localsize){
103:          indices_ov_rd[0] = indices_ov[0];
104:          sources_sc_rd[0] = sources_sc[0];
105:          localsizes_sc[sources_sc[0]]++;
106:    }
107:    localsize_tmp = 1;
108:    /* remove duplicate integers */
109:    for(i=1; i<localsize; i++){
110:          if(indices_ov[i] != indices_ov[i-1]){
111:            indices_ov_rd[localsize_tmp]   = indices_ov[i];
112:            sources_sc_rd[localsize_tmp++] = sources_sc[i];
113:            localsizes_sc[sources_sc[i]]++;
114:          }
115:    }
116:    PetscFree2(indices_ov,sources_sc);
117:    PetscCalloc1(ssize+1,&localoffsets);
118:    for(k=0; k<ssize; k++){
119:          localoffsets[k+1] = localoffsets[k] + localsizes_sc[k];
120:    }
121:    nleaves = localoffsets[ssize];
122:    PetscMemzero(localoffsets,(ssize+1)*sizeof(PetscInt));
123:    nroots  = localsizes_sc[srank];
124:    PetscCalloc1(nleaves,&remote);
125:    for(i=0; i<nleaves; i++){
126:          remote[i].rank  = sources_sc_rd[i];
127:          remote[i].index = localoffsets[sources_sc_rd[i]]++;
128:    }
129:    PetscFree(localoffsets);
130:   }else{
131:    ISDestroy(&allis_sc);
132:    /* Allocate a 'zero' pointer to avoid using uninitialized variable  */
133:    PetscCalloc1(0,&remote);
134:    nleaves = 0;
135:    indices_ov_rd = 0;
136:    sources_sc_rd = 0;
137:   }
138:   /* scatter sizes to everybody */
139:   MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm);
140:   PetscFree(localsizes_sc);
141:   PetscCalloc1(nroots,&indices_recv);
142:   /* set data back to every body */
143:   PetscSFCreate(scomm,&sf);
144:   PetscSFSetType(sf,PETSCSFBASIC);
145:   PetscSFSetFromOptions(sf);
146:   PetscSFSetGraph(sf,nroots,nleaves,PETSC_NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);
147:   PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
148:   PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);
149:   PetscSFDestroy(&sf);
150:   PetscFree2(indices_ov_rd,sources_sc_rd);
151:   ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc);
152:   MatGetSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat);
153:   ISDestroy(&allis_sc);
154:   /* create a partitioner to repartition the sub-matrix */
155:   MatPartitioningCreate(scomm,&part);
156:   MatPartitioningSetAdjacency(part,smat[0]);
157: #if PETSC_HAVE_PARMETIS
158:   /* if there exists a ParMETIS installation, we try to use ParMETIS
159:    * because a repartition routine possibly work better
160:    * */
161:   MatPartitioningSetType(part,MATPARTITIONINGPARMETIS);
162:   /* try to use reparition function, instead of partition function */
163:   MatPartitioningParmetisSetRepartition(part);
164: #else
165:   /* we at least provide a default partitioner to rebalance the computation  */
166:   MatPartitioningSetType(part,MATPARTITIONINGAVERAGE);
167: #endif
168:   /* user can pick up any partitioner by using an option */
169:   MatPartitioningSetFromOptions(part);
170:   MatPartitioningApply(part,&partitioning);
171:   MatPartitioningDestroy(&part);
172:   MatDestroy(&(smat[0]));
173:   PetscFree(smat);
174:   /* get local rows including  overlap */
175:   ISBuildTwoSided(partitioning,is_sc,is);
176:   ISDestroy(&is_sc);
177:   ISDestroy(&partitioning);
178:   PetscCommDestroy(&scomm);
179:   return(0);
180: }