Actual source code: sbaijov.c

petsc-3.7.3 2016-08-01
Report Typos and Errors
  2: /*
  3:    Routines to compute overlapping regions of a parallel MPI matrix.
  4:    Used for finding submatrices that were shared across processors.
  5: */
  6: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
  7: #include <petscbt.h>

  9: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
 10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);

 14: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
 15: {
 17:   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
 18:   IS             *is_new,*is_row;
 19:   Mat            *submats;
 20:   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
 21:   Mat_SeqSBAIJ   *asub_i;
 22:   PetscBT        table;
 23:   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
 24:   const PetscInt *idx;
 25:   PetscBool      flg,*allcolumns,*allrows;

 28:   PetscMalloc1(is_max,&is_new);
 29:   /* Convert the indices into block format */
 30:   ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);
 31:   if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");

 33:   /* ----- previous non-scalable implementation ----- */
 34:   flg  = PETSC_FALSE;
 35:   PetscOptionsHasName(NULL,NULL, "-IncreaseOverlap_old", &flg);
 36:   if (flg) { /* previous non-scalable implementation */
 37:     printf("use previous non-scalable implementation...\n");
 38:     for (i=0; i<ov; ++i) {
 39:       MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
 40:     }
 41:   } else { /* implementation using modified BAIJ routines */

 43:     PetscMalloc1(Mbs+1,&nidx);
 44:     PetscBTCreate(Mbs,&table); /* for column search */
 45:     PetscMalloc2(is_max+1,&allcolumns,is_max+1,&allrows);

 47:     /* Create is_row */
 48:     PetscMalloc1(is_max,&is_row);
 49:     ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);

 51:     allrows[0] = PETSC_TRUE;
 52:     for (i=1; i<is_max; i++) {
 53:       is_row[i]  = is_row[0]; /* reuse is_row[0] */
 54:       allrows[i] = PETSC_TRUE;
 55:     }

 57:     /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
 58:     PetscMalloc1(is_max+1,&submats);

 60:     /* Check for special case: each processor gets entire matrix columns */
 61:     for (i=0; i<is_max; i++) {
 62:       ISIdentity(is_new[i],&flg);
 63:       ISGetLocalSize(is_new[i],&isz);
 64:       if (flg && isz == Mbs) {
 65:         allcolumns[i] = PETSC_TRUE;
 66:       } else {
 67:         allcolumns[i] = PETSC_FALSE;
 68:       }
 69:     }

 71:     /* Determine the number of stages through which submatrices are done */
 72:     nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
 73:     if (!nmax) nmax = 1;
 74:     nstages_local = is_max/nmax + ((is_max % nmax) ? 1 : 0);

 76:     /* Make sure every processor loops through the nstages */
 77:     MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));

 79:     for (iov=0; iov<ov; ++iov) {
 80:       /* 1) Get submats for column search */
 81:       for (i=0,pos=0; i<nstages; i++) {
 82:         if (pos+nmax <= is_max) max_no = nmax;
 83:         else if (pos == is_max) max_no = 0;
 84:         else                    max_no = is_max-pos;
 85:         c->ijonly = PETSC_TRUE;
 86:         MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allrows+pos,allcolumns+pos,submats+pos);
 87:         pos      += max_no;
 88:       }

 90:       /* 2) Row search */
 91:       MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);

 93:       /* 3) Column search */
 94:       for (i=0; i<is_max; i++) {
 95:         asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
 96:         ai     = asub_i->i;;

 98:         /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
 99:         PetscBTMemzero(Mbs,table);

101:         ISGetIndices(is_new[i],&idx);
102:         ISGetLocalSize(is_new[i],&nis);
103:         for (l=0; l<nis; l++) {
104:           PetscBTSet(table,idx[l]);
105:           nidx[l] = idx[l];
106:         }
107:         isz = nis;

109:         /* add column entries to table */
110:         for (brow=0; brow<Mbs; brow++) {
111:           nz = ai[brow+1] - ai[brow];
112:           if (nz) {
113:             if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
114:           }
115:         }
116:         ISRestoreIndices(is_new[i],&idx);
117:         ISDestroy(&is_new[i]);

119:         /* create updated is_new */
120:         ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);
121:       }

123:       /* Free tmp spaces */
124:       for (i=0; i<is_max; i++) {
125:         MatDestroy(&submats[i]);
126:       }
127:     }
128:     PetscFree2(allcolumns,allrows);
129:     PetscBTDestroy(&table);
130:     PetscFree(submats);
131:     ISDestroy(&is_row[0]);
132:     PetscFree(is_row);
133:     PetscFree(nidx);

135:   }

137:   for (i=0; i<is_max; i++) {ISDestroy(&is[i]);}
138:   ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);

140:   for (i=0; i<is_max; i++) {ISDestroy(&is_new[i]);}
141:   PetscFree(is_new);
142:   return(0);
143: }

145: typedef enum {MINE,OTHER} WhoseOwner;
146: /*  data1, odata1 and odata2 are packed in the format (for communication):
147:        data[0]          = is_max, no of is
148:        data[1]          = size of is[0]
149:         ...
150:        data[is_max]     = size of is[is_max-1]
151:        data[is_max + 1] = data(is[0])
152:         ...
153:        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
154:         ...
155:    data2 is packed in the format (for creating output is[]):
156:        data[0]          = is_max, no of is
157:        data[1]          = size of is[0]
158:         ...
159:        data[is_max]     = size of is[is_max-1]
160:        data[is_max + 1] = data(is[0])
161:         ...
162:        data[is_max + 1 + Mbs*i) = data(is[i])
163:         ...
164: */
167: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
168: {
169:   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
171:   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
172:   const PetscInt *idx_i;
173:   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i;
174:   PetscInt       Mbs,i,j,k,*odata1,*odata2;
175:   PetscInt       proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
176:   PetscInt       proc_end=0,len_unused,nodata2;
177:   PetscInt       ois_max; /* max no of is[] in each of processor */
178:   char           *t_p;
179:   MPI_Comm       comm;
180:   MPI_Request    *s_waits1,*s_waits2,r_req;
181:   MPI_Status     *s_status,r_status;
182:   PetscBT        *table;  /* mark indices of this processor's is[] */
183:   PetscBT        table_i;
184:   PetscBT        otable; /* mark indices of other processors' is[] */
185:   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
186:   IS             garray_local,garray_gl;

189:   PetscObjectGetComm((PetscObject)C,&comm);
190:   size = c->size;
191:   rank = c->rank;
192:   Mbs  = c->Mbs;

194:   PetscObjectGetNewTag((PetscObject)C,&tag1);
195:   PetscObjectGetNewTag((PetscObject)C,&tag2);

197:   /* create tables used in
198:      step 1: table[i] - mark c->garray of proc [i]
199:      step 3: table[i] - mark indices of is[i] when whose=MINE
200:              table[0] - mark incideces of is[] when whose=OTHER */
201:   len  = PetscMax(is_max, size);
202:   PetscMalloc2(len,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,&t_p);
203:   for (i=0; i<len; i++) {
204:     table[i] = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
205:   }

207:   MPIU_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);

209:   /* 1. Send this processor's is[] to other processors */
210:   /*---------------------------------------------------*/
211:   /* allocate spaces */
212:   PetscMalloc1(is_max,&n);
213:   len  = 0;
214:   for (i=0; i<is_max; i++) {
215:     ISGetLocalSize(is[i],&n[i]);
216:     len += n[i];
217:   }
218:   if (!len) {
219:     is_max = 0;
220:   } else {
221:     len += 1 + is_max; /* max length of data1 for one processor */
222:   }


225:   PetscMalloc1(size*len+1,&data1);
226:   PetscMalloc1(size,&data1_start);
227:   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;

229:   PetscMalloc4(size,&len_s,size,&btable,size,&iwork,size+1,&Bowners);

231:   /* gather c->garray from all processors */
232:   ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);
233:   ISAllGather(garray_local, &garray_gl);
234:   ISDestroy(&garray_local);
235:   MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);

237:   Bowners[0] = 0;
238:   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];

240:   if (is_max) {
241:     /* hash table ctable which maps c->row to proc_id) */
242:     PetscMalloc1(Mbs,&ctable);
243:     for (proc_id=0,j=0; proc_id<size; proc_id++) {
244:       for (; j<C->rmap->range[proc_id+1]/bs; j++) ctable[j] = proc_id;
245:     }

247:     /* hash tables marking c->garray */
248:     ISGetIndices(garray_gl,&idx_i);
249:     for (i=0; i<size; i++) {
250:       table_i = table[i];
251:       PetscBTMemzero(Mbs,table_i);
252:       for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/
253:         PetscBTSet(table_i,idx_i[j]);
254:       }
255:     }
256:     ISRestoreIndices(garray_gl,&idx_i);
257:   }  /* if (is_max) */
258:   ISDestroy(&garray_gl);

260:   /* evaluate communication - mesg to who, length, and buffer space */
261:   for (i=0; i<size; i++) len_s[i] = 0;

263:   /* header of data1 */
264:   for (proc_id=0; proc_id<size; proc_id++) {
265:     iwork[proc_id]        = 0;
266:     *data1_start[proc_id] = is_max;
267:     data1_start[proc_id]++;
268:     for (j=0; j<is_max; j++) {
269:       if (proc_id == rank) {
270:         *data1_start[proc_id] = n[j];
271:       } else {
272:         *data1_start[proc_id] = 0;
273:       }
274:       data1_start[proc_id]++;
275:     }
276:   }

278:   for (i=0; i<is_max; i++) {
279:     ISGetIndices(is[i],&idx_i);
280:     for (j=0; j<n[i]; j++) {
281:       idx                = idx_i[j];
282:       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
283:       proc_end           = ctable[idx];
284:       for (proc_id=0; proc_id<=proc_end; proc_id++) {  /* for others to process */
285:         if (proc_id == rank) continue; /* done before this loop */
286:         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) continue; /* no need for sending idx to [proc_id] */
287:         *data1_start[proc_id] = idx; data1_start[proc_id]++;
288:         len_s[proc_id]++;
289:       }
290:     }
291:     /* update header data */
292:     for (proc_id=0; proc_id<size; proc_id++) {
293:       if (proc_id== rank) continue;
294:       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
295:       iwork[proc_id]                 = len_s[proc_id];
296:     }
297:     ISRestoreIndices(is[i],&idx_i);
298:   }

300:   nrqs = 0; nrqr = 0;
301:   for (i=0; i<size; i++) {
302:     data1_start[i] = data1 + i*len;
303:     if (len_s[i]) {
304:       nrqs++;
305:       len_s[i] += 1 + is_max; /* add no. of header msg */
306:     }
307:   }

309:   for (i=0; i<is_max; i++) {
310:     ISDestroy(&is[i]);
311:   }
312:   PetscFree(n);
313:   PetscFree(ctable);

315:   /* Determine the number of messages to expect, their lengths, from from-ids */
316:   PetscGatherNumberOfMessages(comm,NULL,len_s,&nrqr);
317:   PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);

319:   /*  Now  post the sends */
320:   PetscMalloc2(size,&s_waits1,size,&s_waits2);
321:   k    = 0;
322:   for (proc_id=0; proc_id<size; proc_id++) {  /* send data1 to processor [proc_id] */
323:     if (len_s[proc_id]) {
324:       MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
325:       k++;
326:     }
327:   }

329:   /* 2. Receive other's is[] and process. Then send back */
330:   /*-----------------------------------------------------*/
331:   len = 0;
332:   for (i=0; i<nrqr; i++) {
333:     if (len_r1[i] > len) len = len_r1[i];
334:   }
335:   PetscFree(len_r1);
336:   PetscFree(id_r1);

338:   for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;

340:   PetscMalloc1(len+1,&odata1);
341:   PetscMalloc1(size,&odata2_ptr);
342:   PetscBTCreate(Mbs,&otable);

344:   len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
345:   len_est = 2*len_max;       /* estimated space of storing is[] for all receiving messages */
346:   PetscMalloc1(len_est+1,&odata2);
347:   nodata2 = 0;               /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */

349:   odata2_ptr[nodata2] = odata2;

351:   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */

353:   k = 0;
354:   while (k < nrqr) {
355:     /* Receive messages */
356:     MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
357:     if (flag) {
358:       MPI_Get_count(&r_status,MPIU_INT,&len);
359:       proc_id = r_status.MPI_SOURCE;
360:       MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
361:       MPI_Wait(&r_req,&r_status);

363:       /*  Process messages */
364:       /*  make sure there is enough unused space in odata2 array */
365:       if (len_unused < len_max) { /* allocate more space for odata2 */
366:         PetscMalloc1(len_est+1,&odata2);

368:         odata2_ptr[++nodata2] = odata2;

370:         len_unused = len_est;
371:       }

373:       MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
374:       len  = 1 + odata2[0];
375:       for (i=0; i<odata2[0]; i++) len += odata2[1 + i];

377:       /* Send messages back */
378:       MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
379:       k++;
380:       odata2        += len;
381:       len_unused    -= len;
382:       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
383:     }
384:   }
385:   PetscFree(odata1);
386:   PetscBTDestroy(&otable);

388:   /* 3. Do local work on this processor's is[] */
389:   /*-------------------------------------------*/
390:   /* make sure there is enough unused space in odata2(=data) array */
391:   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
392:   if (len_unused < len_max) { /* allocate more space for odata2 */
393:     PetscMalloc1(len_est+1,&odata2);

395:     odata2_ptr[++nodata2] = odata2;
396:   }

398:   data = odata2;
399:   MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
400:   PetscFree(data1_start);

402:   /* 4. Receive work done on other processors, then merge */
403:   /*------------------------------------------------------*/
404:   /* get max number of messages that this processor expects to recv */
405:   MPIU_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);
406:   PetscMalloc1(iwork[rank]+1,&data2);
407:   PetscFree4(len_s,btable,iwork,Bowners);

409:   k = 0;
410:   while (k < nrqs) {
411:     /* Receive messages */
412:     MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
413:     if (flag) {
414:       MPI_Get_count(&r_status,MPIU_INT,&len);

416:       proc_id = r_status.MPI_SOURCE;

418:       MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
419:       MPI_Wait(&r_req,&r_status);
420:       if (len > 1+is_max) { /* Add data2 into data */
421:         data2_i = data2 + 1 + is_max;
422:         for (i=0; i<is_max; i++) {
423:           table_i = table[i];
424:           data_i  = data + 1 + is_max + Mbs*i;
425:           isz     = data[1+i];
426:           for (j=0; j<data2[1+i]; j++) {
427:             col = data2_i[j];
428:             if (!PetscBTLookupSet(table_i,col)) data_i[isz++] = col;
429:           }
430:           data[1+i] = isz;
431:           if (i < is_max - 1) data2_i += data2[1+i];
432:         }
433:       }
434:       k++;
435:     }
436:   }
437:   PetscFree(data2);
438:   PetscFree2(table,t_p);

440:   /* phase 1 sends are complete */
441:   PetscMalloc1(size,&s_status);
442:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
443:   PetscFree(data1);

445:   /* phase 2 sends are complete */
446:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status);}
447:   PetscFree2(s_waits1,s_waits2);
448:   PetscFree(s_status);

450:   /* 5. Create new is[] */
451:   /*--------------------*/
452:   for (i=0; i<is_max; i++) {
453:     data_i = data + 1 + is_max + Mbs*i;
454:     ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);
455:   }
456:   for (k=0; k<=nodata2; k++) {
457:     PetscFree(odata2_ptr[k]);
458:   }
459:   PetscFree(odata2_ptr);
460:   return(0);
461: }

465: /*
466:    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
467:        the work on the local processor.

469:      Inputs:
470:       C      - MAT_MPISBAIJ;
471:       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
472:       whose  - whose is[] to be processed,
473:                MINE:  this processor's is[]
474:                OTHER: other processor's is[]
475:      Output:
476:        nidx  - whose = MINE:
477:                      holds input and newly found indices in the same format as data
478:                whose = OTHER:
479:                      only holds the newly found indices
480:        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
481: */
482: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
483: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
484: {
485:   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
486:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
487:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
489:   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
490:   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
491:   PetscBT        table0;  /* mark the indices of input is[] for look up */
492:   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */

495:   Mbs    = c->Mbs; mbs = a->mbs;
496:   ai     = a->i; aj = a->j;
497:   bi     = b->i; bj = b->j;
498:   garray = c->garray;
499:   rstart = c->rstartbs;
500:   is_max = data[0];

502:   PetscBTCreate(Mbs,&table0);

504:   nidx[0] = is_max;
505:   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
506:   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
507:   for (i=0; i<is_max; i++) { /* for each is */
508:     isz = 0;
509:     n   = data[1+i]; /* size of input is[i] */

511:     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
512:     if (whose == MINE) { /* process this processor's is[] */
513:       table_i = table[i];
514:       nidx_i  = nidx + 1+ is_max + Mbs*i;
515:     } else {            /* process other processor's is[] - only use one temp table */
516:       table_i = table[0];
517:     }
518:     PetscBTMemzero(Mbs,table_i);
519:     PetscBTMemzero(Mbs,table0);
520:     if (n==0) {
521:       nidx[1+i] = 0;  /* size of new is[i] */
522:       continue;
523:     }

525:     isz0 = 0; col_max = 0;
526:     for (j=0; j<n; j++) {
527:       col = idx_i[j];
528:       if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
529:       if (!PetscBTLookupSet(table_i,col)) {
530:         PetscBTSet(table0,col);
531:         if (whose == MINE) nidx_i[isz0] = col;
532:         if (col_max < col) col_max = col;
533:         isz0++;
534:       }
535:     }

537:     if (whose == MINE) isz = isz0;
538:     k = 0;  /* no. of indices from input is[i] that have been examined */
539:     for (row=0; row<mbs; row++) {
540:       a_start = ai[row]; a_end = ai[row+1];
541:       b_start = bi[row]; b_end = bi[row+1];
542:       if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]:
543:                                                 do row search: collect all col in this row */
544:         for (l = a_start; l<a_end ; l++) { /* Amat */
545:           col = aj[l] + rstart;
546:           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
547:         }
548:         for (l = b_start; l<b_end ; l++) { /* Bmat */
549:           col = garray[bj[l]];
550:           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
551:         }
552:         k++;
553:         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
554:       } else { /* row is not on input is[i]:
555:                   do col serach: add row onto nidx_i if there is a col in nidx_i */
556:         for (l = a_start; l<a_end; l++) {  /* Amat */
557:           col = aj[l] + rstart;
558:           if (col > col_max) break;
559:           if (PetscBTLookup(table0,col)) {
560:             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
561:             break; /* for l = start; l<end ; l++) */
562:           }
563:         }
564:         for (l = b_start; l<b_end; l++) {  /* Bmat */
565:           col = garray[bj[l]];
566:           if (col > col_max) break;
567:           if (PetscBTLookup(table0,col)) {
568:             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
569:             break; /* for l = start; l<end ; l++) */
570:           }
571:         }
572:       }
573:     }

575:     if (i < is_max - 1) {
576:       idx_i  += n;   /* ptr to input is[i+1] array */
577:       nidx_i += isz; /* ptr to output is[i+1] array */
578:     }
579:     nidx[1+i] = isz; /* size of new is[i] */
580:   } /* for each is */
581:   PetscBTDestroy(&table0);
582:   return(0);
583: }