Actual source code: ex183.c

petsc-3.8.4 2018-03-24
Report Typos and Errors
  1: static char help[] =
  2:   "Example of extracting an array of MPI submatrices from a given MPI matrix.\n"
  3:   "This test can only be run in parallel.\n"
  4:   "\n";

  6: /*T
  7:    Concepts: Mat^mat submatrix, parallel
  8:    Processors: n
  9: T*/


 12:  #include <petscmat.h>

 14: int main(int argc, char **args)
 15: {
 16:   Mat             A,*submats;
 17:   MPI_Comm        subcomm;
 18:   PetscMPIInt     rank,size,subrank,subsize,color;
 19:   PetscInt        m,n,N,bs,rstart,rend,i,j,k,total_subdomains,hash,nsubdomains=1;
 20:   PetscInt        nis,*cols,gnsubdomains,gsubdomainnums[1],gsubdomainperm[1],s,gs;
 21:   PetscInt        *rowindices,*colindices,idx,rep;
 22:   PetscScalar     *vals;
 23:   IS              rowis[1],colis[1];
 24:   PetscViewer     viewer;
 25:   PetscBool       permute_indices,flg;
 26:   PetscErrorCode  ierr;

 28:   PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
 29:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 30:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);

 32:   PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"ex183","Mat");
 33:   m = 5;
 34:   PetscOptionsInt("-m","Local matrix size","MatSetSizes",m,&m,&flg);
 35:   total_subdomains = size-1;
 36:   PetscOptionsInt("-total_subdomains","Number of submatrices where 0 < n < comm size","MatCreateSubMatricesMPI",total_subdomains,&total_subdomains,&flg);
 37:   permute_indices = PETSC_FALSE;
 38:   PetscOptionsBool("-permute_indices","Whether to permute indices before breaking them into subdomains","ISCreateGeneral",permute_indices,&permute_indices,&flg);
 39:   hash = 7;
 40:   PetscOptionsInt("-hash","Permutation factor, which has to be relatively prime to M = size*m (total matrix size)","ISCreateGeneral",hash,&hash,&flg);
 41:   rep = 2;
 42:   PetscOptionsInt("-rep","Number of times to carry out submatrix extractions; currently only 1 & 2 are supported",NULL,rep,&rep,&flg);
 43:   PetscOptionsEnd();

 45:   if (total_subdomains > size) SETERRQ2(PETSC_COMM_WORLD,PETSC_ERR_ARG_WRONG,"Number of subdomains %D must not exceed comm size %D",total_subdomains,size);
 46:   if (total_subdomains < 1 || total_subdomains > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and <= %D (comm size), got total_subdomains = %D",size,total_subdomains);
 47:   if (rep != 1 && rep != 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid number of test repetitions: %D; must be 1 or 2",rep);

 49:   viewer = PETSC_VIEWER_STDOUT_WORLD;
 50:   /* Create logically sparse, but effectively dense matrix for easy verification of submatrix extraction correctness. */
 51:   MatCreate(PETSC_COMM_WORLD,&A);
 52:   MatSetSizes(A,m,m,PETSC_DECIDE,PETSC_DECIDE);
 53:   MatSetFromOptions(A);
 54:   MatSetUp(A);
 55:   MatGetSize(A,NULL,&N);
 56:   MatGetLocalSize(A,NULL,&n);
 57:   MatGetBlockSize(A,&bs);
 58:   MatSeqAIJSetPreallocation(A,n,NULL);
 59:   MatMPIAIJSetPreallocation(A,n,NULL,N-n,NULL);
 60:   MatSeqBAIJSetPreallocation(A,bs,n/bs,NULL);
 61:   MatMPIBAIJSetPreallocation(A,bs,n/bs,NULL,(N-n)/bs,NULL);
 62:   MatSeqSBAIJSetPreallocation(A,bs,n/bs,NULL);
 63:   MatMPISBAIJSetPreallocation(A,bs,n/bs,NULL,(N-n)/bs,NULL);

 65:   PetscMalloc2(N,&cols,N,&vals);
 66:   MatGetOwnershipRange(A,&rstart,&rend);
 67:   for (j = 0; j < N; ++j) cols[j] = j;
 68:   for (i=rstart; i<rend; i++) {
 69:     for (j=0;j<N;++j) {
 70:       vals[j] = i*10000+j;
 71:     }
 72:     MatSetValues(A,1,&i,N,cols,vals,INSERT_VALUES);
 73:   }
 74:   PetscFree2(cols,vals);
 75:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
 76:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

 78:   PetscViewerASCIIPrintf(viewer,"Initial matrix:\n");
 79:   MatView(A,viewer);


 82:   /*
 83:      Create subcomms and ISs so that each rank participates in one IS.
 84:      The IS either coalesces adjacent rank indices (contiguous),
 85:      or selects indices by scrambling them using a hash.
 86:   */
 87:   k = size/total_subdomains + (size%total_subdomains>0); /* There are up to k ranks to a color */
 88:   color = rank/k;
 89:   MPI_Comm_split(PETSC_COMM_WORLD,color,rank,&subcomm);
 90:   MPI_Comm_size(subcomm,&subsize);
 91:   MPI_Comm_rank(subcomm,&subrank);
 92:   MatGetOwnershipRange(A,&rstart,&rend);
 93:   nis = 1;
 94:   PetscMalloc2(rend-rstart,&rowindices,rend-rstart,&colindices);

 96:   for (j = rstart; j < rend; ++j) {
 97:     if (permute_indices) {
 98:       idx = (j*hash);
 99:     } else {
100:       idx = j;
101:     }
102:     rowindices[j-rstart] = idx%N;
103:     colindices[j-rstart] = (idx+m)%N;
104:   }
105:   ISCreateGeneral(subcomm,rend-rstart,rowindices,PETSC_COPY_VALUES,&rowis[0]);
106:   ISCreateGeneral(subcomm,rend-rstart,colindices,PETSC_COPY_VALUES,&colis[0]);
107:   ISSort(rowis[0]);
108:   ISSort(colis[0]);
109:   PetscFree2(rowindices,colindices);
110:   /*
111:     Now view the ISs.  To avoid deadlock when viewing a list of objects on different subcomms,
112:     we need to obtain the global numbers of our local objects and wait for the corresponding global
113:     number to be viewed.
114:   */
115:   PetscViewerASCIIPrintf(viewer,"Subdomains");
116:   if (permute_indices) {
117:     PetscViewerASCIIPrintf(viewer," (hash=%D)",hash);
118:   }
119:   PetscViewerASCIIPrintf(viewer,":\n");
120:   PetscViewerFlush(viewer);

122:   nsubdomains = 1;
123:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
124:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)rowis,&gnsubdomains,gsubdomainnums);
125:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
126:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
127:     if (s < nsubdomains) {
128:       PetscInt ss;
129:       ss = gsubdomainperm[s];
130:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
131:         PetscViewer subviewer = NULL;
132:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)rowis[ss]),&subviewer);
133:         PetscViewerASCIIPrintf(subviewer,"Row IS %D\n",gs);
134:         ISView(rowis[ss],subviewer);
135:         PetscViewerFlush(subviewer);
136:         PetscViewerASCIIPrintf(subviewer,"Col IS %D\n",gs);
137:         ISView(colis[ss],subviewer);
138:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)rowis[ss]),&subviewer);
139:         ++s;
140:       }
141:     }
142:     MPI_Barrier(PETSC_COMM_WORLD);
143:   }
144:   PetscViewerFlush(viewer);
145:   ISSort(rowis[0]);
146:   ISSort(colis[0]);
147:   nsubdomains = 1;
148:   MatCreateSubMatricesMPI(A,nsubdomains,rowis,colis,MAT_INITIAL_MATRIX,&submats);
149:   /*
150:     Now view the matrices.  To avoid deadlock when viewing a list of objects on different subcomms,
151:     we need to obtain the global numbers of our local objects and wait for the corresponding global
152:     number to be viewed.
153:   */
154:   PetscViewerASCIIPrintf(viewer,"Submatrices (repetition 1):\n");
155:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
156:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)submats,&gnsubdomains,gsubdomainnums);
157:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
158:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
159:     if (s < nsubdomains) {
160:       PetscInt ss;
161:       ss = gsubdomainperm[s];
162:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
163:         PetscViewer subviewer = NULL;
164:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
165:         MatView(submats[ss],subviewer);
166:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
167:         ++s;
168:       }
169:     }
170:     MPI_Barrier(PETSC_COMM_WORLD);
171:   }
172:   PetscViewerFlush(viewer);
173:   if (rep == 1) goto cleanup;
174:   nsubdomains = 1;
175:   MatCreateSubMatricesMPI(A,nsubdomains,rowis,colis,MAT_REUSE_MATRIX,&submats);
176:   /*
177:     Now view the matrices.  To avoid deadlock when viewing a list of objects on different subcomms,
178:     we need to obtain the global numbers of our local objects and wait for the corresponding global
179:     number to be viewed.
180:   */
181:   PetscViewerASCIIPrintf(viewer,"Submatrices (repetition 2):\n");
182:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
183:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)submats,&gnsubdomains,gsubdomainnums);
184:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
185:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
186:     if (s < nsubdomains) {
187:       PetscInt ss;
188:       ss = gsubdomainperm[s];
189:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
190:         PetscViewer subviewer = NULL;
191:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
192:         MatView(submats[ss],subviewer);
193:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
194:         ++s;
195:       }
196:     }
197:     MPI_Barrier(PETSC_COMM_WORLD);
198:   }
199:   PetscViewerFlush(viewer);
200:   cleanup:
201:   for (k=0;k<nsubdomains;++k) {
202:     MatDestroy(submats+k);
203:   }
204:   PetscFree(submats);
205:   for (k=0;k<nis;++k) {
206:     ISDestroy(rowis+k);
207:     ISDestroy(colis+k);
208:   }
209:   MatDestroy(&A);
210:   MPI_Comm_free(&subcomm);
211:   PetscFinalize();
212:   return ierr;
213: }