Actual source code: sbaijov.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:    Routines to compute overlapping regions of a parallel MPI matrix.
  4:    Used for finding submatrices that were shared across processors.
  5: */
  6: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h> 
  7: #include <petscbt.h>

  9: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
 10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);

 14: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
 15: {
 17:   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
 18:   IS             *is_new,*is_row;
 19:   Mat            *submats;
 20:   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
 21:   Mat_SeqSBAIJ   *asub_i;
 22:   PetscBT        table;
 23:   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
 24:   const PetscInt *idx;
 25:   PetscBool      flg,*allcolumns,*allrows;

 28:   PetscMalloc(is_max*sizeof(IS),&is_new);
 29:   /* Convert the indices into block format */
 30:   ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);
 31:   if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
 32: 
 33:   /* ----- previous non-scalable implementation ----- */
 34:   flg=PETSC_FALSE;
 35:   PetscOptionsHasName(PETSC_NULL, "-IncreaseOverlap_old", &flg);
 36:   if (flg){ /* previous non-scalable implementation */
 37:     printf("use previous non-scalable implementation...\n");
 38:     for (i=0; i<ov; ++i) {
 39:       MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
 40:     }
 41:   } else { /* implementation using modified BAIJ routines */
 42: 
 43:   PetscMalloc((Mbs+1)*sizeof(PetscInt),&nidx);
 44:   PetscBTCreate(Mbs,&table); /* for column search */
 45:   PetscMalloc2(is_max+1,PetscBool,&allcolumns,is_max+1,PetscBool,&allrows);

 47:   /* Create is_row */
 48:   PetscMalloc(is_max*sizeof(IS **),&is_row);
 49:   ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);
 50:   allrows[0] = PETSC_TRUE;
 51:   for (i=1; i<is_max; i++) {
 52:     is_row[i]  = is_row[0]; /* reuse is_row[0] */
 53:     allrows[i] = PETSC_TRUE;
 54:   }
 55: 
 56:   /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
 57:   PetscMalloc((is_max+1)*sizeof(Mat),&submats);

 59:   /* Check for special case: each processor gets entire matrix columns */
 60:   for (i=0; i<is_max; i++) {
 61:     ISIdentity(is_new[i],&flg);
 62:     ISGetLocalSize(is_new[i],&isz);
 63:     if (flg && isz == Mbs){
 64:       allcolumns[i] = PETSC_TRUE;
 65:     } else {
 66:       allcolumns[i] = PETSC_FALSE;
 67:     }
 68:   }

 70:   /* Determine the number of stages through which submatrices are done */
 71:   nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
 72:   if (!nmax) nmax = 1;
 73:   nstages_local = is_max/nmax + ((is_max % nmax)?1:0);
 74: 
 75:   /* Make sure every processor loops through the nstages */
 76:   MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);
 77: 
 78:   for (iov=0; iov<ov; ++iov) {
 79:     /* 1) Get submats for column search */
 80:     for (i=0,pos=0; i<nstages; i++) {
 81:       if (pos+nmax <= is_max) max_no = nmax;
 82:       else if (pos == is_max) max_no = 0;
 83:       else                   max_no = is_max-pos;
 84:       c->ijonly = PETSC_TRUE;
 85:       MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allrows+pos,allcolumns+pos,submats+pos);
 86:       pos += max_no;
 87:     }
 88: 
 89:     /* 2) Row search */
 90:     MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);
 91: 
 92:     /* 3) Column search */
 93:     for (i=0; i<is_max; i++){
 94:       asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
 95:       ai=asub_i->i;;
 96: 
 97:       /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
 98:       PetscBTMemzero(Mbs,table);
 99: 
100:       ISGetIndices(is_new[i],&idx);
101:       ISGetLocalSize(is_new[i],&nis);
102:       for (l=0; l<nis; l++) {
103:         PetscBTSet(table,idx[l]);
104:         nidx[l] = idx[l];
105:       }
106:       isz = nis;

108:       /* add column entries to table */
109:       for (brow=0; brow<Mbs; brow++){
110:         nz = ai[brow+1] - ai[brow];
111:         if (nz) {
112:           if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
113:         }
114:       }
115:       ISRestoreIndices(is_new[i],&idx);
116:       ISDestroy(&is_new[i]);

118:       /* create updated is_new */
119:       ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);
120:     }

122:     /* Free tmp spaces */
123:     for (i=0; i<is_max; i++){
124:       MatDestroy(&submats[i]);
125:     }
126:   }
127:   PetscFree2(allcolumns,allrows);
128:   PetscBTDestroy(&table);
129:   PetscFree(submats);
130:   ISDestroy(&is_row[0]);
131:   PetscFree(is_row);
132:   PetscFree(nidx);

134:   }

136:   for (i=0; i<is_max; i++) {ISDestroy(&is[i]);}
137:   ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);

139:   for (i=0; i<is_max; i++) {ISDestroy(&is_new[i]);}
140:   PetscFree(is_new);
141:   return(0);
142: }

144: typedef enum {MINE,OTHER} WhoseOwner;
145: /*  data1, odata1 and odata2 are packed in the format (for communication):
146:        data[0]          = is_max, no of is 
147:        data[1]          = size of is[0]
148:         ...
149:        data[is_max]     = size of is[is_max-1]
150:        data[is_max + 1] = data(is[0])
151:         ...
152:        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
153:         ...
154:    data2 is packed in the format (for creating output is[]):
155:        data[0]          = is_max, no of is 
156:        data[1]          = size of is[0]
157:         ...
158:        data[is_max]     = size of is[is_max-1]
159:        data[is_max + 1] = data(is[0])
160:         ...
161:        data[is_max + 1 + Mbs*i) = data(is[i])
162:         ...
163: */
166: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
167: {
168:   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
170:   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
171:   const PetscInt *idx_i;
172:   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
173:                  Mbs,i,j,k,*odata1,*odata2,
174:                  proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
175:   PetscInt       proc_end=0,*iwork,len_unused,nodata2;
176:   PetscInt       ois_max; /* max no of is[] in each of processor */
177:   char           *t_p;
178:   MPI_Comm       comm;
179:   MPI_Request    *s_waits1,*s_waits2,r_req;
180:   MPI_Status     *s_status,r_status;
181:   PetscBT        *table;  /* mark indices of this processor's is[] */
182:   PetscBT        table_i;
183:   PetscBT        otable; /* mark indices of other processors' is[] */
184:   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
185:   IS             garray_local,garray_gl;

188:   comm = ((PetscObject)C)->comm;
189:   size = c->size;
190:   rank = c->rank;
191:   Mbs  = c->Mbs;

193:   PetscObjectGetNewTag((PetscObject)C,&tag1);
194:   PetscObjectGetNewTag((PetscObject)C,&tag2);

196:   /* create tables used in
197:      step 1: table[i] - mark c->garray of proc [i]
198:      step 3: table[i] - mark indices of is[i] when whose=MINE     
199:              table[0] - mark incideces of is[] when whose=OTHER */
200:   len = PetscMax(is_max, size);
201:   PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);
202:   for (i=0; i<len; i++) {
203:     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
204:   }

206:   MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);
207: 
208:   /* 1. Send this processor's is[] to other processors */
209:   /*---------------------------------------------------*/
210:   /* allocate spaces */
211:   PetscMalloc(is_max*sizeof(PetscInt),&n);
212:   len = 0;
213:   for (i=0; i<is_max; i++) {
214:     ISGetLocalSize(is[i],&n[i]);
215:     len += n[i];
216:   }
217:   if (!len) {
218:     is_max = 0;
219:   } else {
220:     len += 1 + is_max; /* max length of data1 for one processor */
221:   }

223: 
224:   PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);
225:   PetscMalloc(size*sizeof(PetscInt*),&data1_start);
226:   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;

228:   PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscInt,&iwork,size+1,PetscInt,&Bowners);

230:   /* gather c->garray from all processors */
231:   ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);
232:   ISAllGather(garray_local, &garray_gl);
233:   ISDestroy(&garray_local);
234:   MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);
235:   Bowners[0] = 0;
236:   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
237: 
238:   if (is_max){
239:     /* hash table ctable which maps c->row to proc_id) */
240:     PetscMalloc(Mbs*sizeof(PetscInt),&ctable);
241:     for (proc_id=0,j=0; proc_id<size; proc_id++) {
242:       for (; j<C->rmap->range[proc_id+1]/bs; j++) {
243:         ctable[j] = proc_id;
244:       }
245:     }

247:     /* hash tables marking c->garray */
248:     ISGetIndices(garray_gl,&idx_i);
249:     for (i=0; i<size; i++){
250:       table_i = table[i];
251:       PetscBTMemzero(Mbs,table_i);
252:       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
253:         PetscBTSet(table_i,idx_i[j]);
254:       }
255:     }
256:     ISRestoreIndices(garray_gl,&idx_i);
257:   }  /* if (is_max) */
258:   ISDestroy(&garray_gl);

260:   /* evaluate communication - mesg to who, length, and buffer space */
261:   for (i=0; i<size; i++) len_s[i] = 0;
262: 
263:   /* header of data1 */
264:   for (proc_id=0; proc_id<size; proc_id++){
265:     iwork[proc_id] = 0;
266:     *data1_start[proc_id] = is_max;
267:     data1_start[proc_id]++;
268:     for (j=0; j<is_max; j++) {
269:       if (proc_id == rank){
270:         *data1_start[proc_id] = n[j];
271:       } else {
272:         *data1_start[proc_id] = 0;
273:       }
274:       data1_start[proc_id]++;
275:     }
276:   }
277: 
278:   for (i=0; i<is_max; i++) {
279:     ISGetIndices(is[i],&idx_i);
280:     for (j=0; j<n[i]; j++){
281:       idx = idx_i[j];
282:       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
283:       proc_end = ctable[idx];
284:       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
285:         if (proc_id == rank ) continue; /* done before this loop */
286:         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
287:           continue;   /* no need for sending idx to [proc_id] */
288:         *data1_start[proc_id] = idx; data1_start[proc_id]++;
289:         len_s[proc_id]++;
290:       }
291:     }
292:     /* update header data */
293:     for (proc_id=0; proc_id<size; proc_id++){
294:       if (proc_id== rank) continue;
295:       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
296:       iwork[proc_id] = len_s[proc_id] ;
297:     }
298:     ISRestoreIndices(is[i],&idx_i);
299:   }

301:   nrqs = 0; nrqr = 0;
302:   for (i=0; i<size; i++){
303:     data1_start[i] = data1 + i*len;
304:     if (len_s[i]){
305:       nrqs++;
306:       len_s[i] += 1 + is_max; /* add no. of header msg */
307:     }
308:   }

310:   for (i=0; i<is_max; i++) {
311:     ISDestroy(&is[i]);
312:   }
313:   PetscFree(n);
314:   PetscFree(ctable);

316:   /* Determine the number of messages to expect, their lengths, from from-ids */
317:   PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);
318:   PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
319: 
320:   /*  Now  post the sends */
321:   PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);
322:   k = 0;
323:   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
324:     if (len_s[proc_id]){
325:       MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
326:       k++;
327:     }
328:   }

330:   /* 2. Receive other's is[] and process. Then send back */
331:   /*-----------------------------------------------------*/
332:   len = 0;
333:   for (i=0; i<nrqr; i++){
334:     if (len_r1[i] > len)len = len_r1[i];
335:   }
336:   PetscFree(len_r1);
337:   PetscFree(id_r1);

339:   for (proc_id=0; proc_id<size; proc_id++)
340:     len_s[proc_id] = iwork[proc_id] = 0;
341: 
342:   PetscMalloc((len+1)*sizeof(PetscInt),&odata1);
343:   PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);
344:   PetscBTCreate(Mbs,&otable);

346:   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
347:   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
348:   PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
349:   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
350:   odata2_ptr[nodata2] = odata2;
351:   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
352: 
353:   k = 0;
354:   while (k < nrqr){
355:     /* Receive messages */
356:     MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
357:     if (flag){
358:       MPI_Get_count(&r_status,MPIU_INT,&len);
359:       proc_id = r_status.MPI_SOURCE;
360:       MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
361:       MPI_Wait(&r_req,&r_status);

363:       /*  Process messages */
364:       /*  make sure there is enough unused space in odata2 array */
365:       if (len_unused < len_max){ /* allocate more space for odata2 */
366:         PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
367:         odata2_ptr[++nodata2] = odata2;
368:         len_unused = len_est;
369:       }

371:       MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
372:       len = 1 + odata2[0];
373:       for (i=0; i<odata2[0]; i++){
374:         len += odata2[1 + i];
375:       }

377:       /* Send messages back */
378:       MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
379:       k++;
380:       odata2     += len;
381:       len_unused -= len;
382:       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
383:     }
384:   }
385:   PetscFree(odata1);
386:   PetscBTDestroy(&otable);

388:   /* 3. Do local work on this processor's is[] */
389:   /*-------------------------------------------*/
390:   /* make sure there is enough unused space in odata2(=data) array */
391:   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
392:   if (len_unused < len_max){ /* allocate more space for odata2 */
393:     PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
394:     odata2_ptr[++nodata2] = odata2;
395:     len_unused = len_est;
396:   }

398:   data = odata2;
399:   MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
400:   PetscFree(data1_start);

402:   /* 4. Receive work done on other processors, then merge */
403:   /*------------------------------------------------------*/
404:   /* get max number of messages that this processor expects to recv */
405:   MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);
406:   PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);
407:   PetscFree4(len_s,btable,iwork,Bowners);

409:   k = 0;
410:   while (k < nrqs){
411:     /* Receive messages */
412:     MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
413:     if (flag){
414:       MPI_Get_count(&r_status,MPIU_INT,&len);
415:       proc_id = r_status.MPI_SOURCE;
416:       MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
417:       MPI_Wait(&r_req,&r_status);
418:       if (len > 1+is_max){ /* Add data2 into data */
419:         data2_i = data2 + 1 + is_max;
420:         for (i=0; i<is_max; i++){
421:           table_i = table[i];
422:           data_i  = data + 1 + is_max + Mbs*i;
423:           isz     = data[1+i];
424:           for (j=0; j<data2[1+i]; j++){
425:             col = data2_i[j];
426:             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
427:           }
428:           data[1+i] = isz;
429:           if (i < is_max - 1) data2_i += data2[1+i];
430:         }
431:       }
432:       k++;
433:     }
434:   }
435:   PetscFree(data2);
436:   PetscFree2(table,t_p);

438:   /* phase 1 sends are complete */
439:   PetscMalloc(size*sizeof(MPI_Status),&s_status);
440:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
441:   PetscFree(data1);
442: 
443:   /* phase 2 sends are complete */
444:   if (nrqr){MPI_Waitall(nrqr,s_waits2,s_status);}
445:   PetscFree2(s_waits1,s_waits2);
446:   PetscFree(s_status);

448:   /* 5. Create new is[] */
449:   /*--------------------*/
450:   for (i=0; i<is_max; i++) {
451:     data_i = data + 1 + is_max + Mbs*i;
452:     ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);
453:   }
454:   for (k=0; k<=nodata2; k++){
455:     PetscFree(odata2_ptr[k]);
456:   }
457:   PetscFree(odata2_ptr);

459:   return(0);
460: }

464: /*  
465:    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 
466:        the work on the local processor.

468:      Inputs:
469:       C      - MAT_MPISBAIJ;
470:       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
471:       whose  - whose is[] to be processed, 
472:                MINE:  this processor's is[]
473:                OTHER: other processor's is[]
474:      Output:  
475:        nidx  - whose = MINE:
476:                      holds input and newly found indices in the same format as data
477:                whose = OTHER:
478:                      only holds the newly found indices
479:        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
480: */
481: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
482: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
483: {
484:   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
485:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
486:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
488:   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
489:   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
490:   PetscBT        table0;  /* mark the indices of input is[] for look up */
491:   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
492: 
494:   Mbs = c->Mbs; mbs = a->mbs;
495:   ai = a->i; aj = a->j;
496:   bi = b->i; bj = b->j;
497:   garray = c->garray;
498:   rstart = c->rstartbs;
499:   is_max = data[0];

501:   PetscBTCreate(Mbs,&table0);
502: 
503:   nidx[0] = is_max;
504:   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
505:   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
506:   for (i=0; i<is_max; i++) { /* for each is */
507:     isz  = 0;
508:     n = data[1+i]; /* size of input is[i] */

510:     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
511:     if (whose == MINE){ /* process this processor's is[] */
512:       table_i = table[i];
513:       nidx_i  = nidx + 1+ is_max + Mbs*i;
514:     } else {            /* process other processor's is[] - only use one temp table */
515:       table_i = table[0];
516:     }
517:     PetscBTMemzero(Mbs,table_i);
518:     PetscBTMemzero(Mbs,table0);
519:     if (n==0) {
520:        nidx[1+i] = 0; /* size of new is[i] */
521:        continue;
522:     }

524:     isz0 = 0; col_max = 0;
525:     for (j=0; j<n; j++){
526:       col = idx_i[j];
527:       if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
528:       if(!PetscBTLookupSet(table_i,col)) {
529:         PetscBTSet(table0,col);
530:         if (whose == MINE) {nidx_i[isz0] = col;}
531:         if (col_max < col) col_max = col;
532:         isz0++;
533:       }
534:     }
535: 
536:     if (whose == MINE) {isz = isz0;}
537:     k = 0;  /* no. of indices from input is[i] that have been examined */
538:     for (row=0; row<mbs; row++){
539:       a_start = ai[row]; a_end = ai[row+1];
540:       b_start = bi[row]; b_end = bi[row+1];
541:       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
542:                                                 do row search: collect all col in this row */
543:         for (l = a_start; l<a_end ; l++){ /* Amat */
544:           col = aj[l] + rstart;
545:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
546:         }
547:         for (l = b_start; l<b_end ; l++){ /* Bmat */
548:           col = garray[bj[l]];
549:           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
550:         }
551:         k++;
552:         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
553:       } else { /* row is not on input is[i]:
554:                   do col serach: add row onto nidx_i if there is a col in nidx_i */
555:         for (l = a_start; l<a_end ; l++){ /* Amat */
556:           col = aj[l] + rstart;
557:           if (col > col_max) break;
558:           if (PetscBTLookup(table0,col)){
559:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
560:             break; /* for l = start; l<end ; l++) */
561:           }
562:         }
563:         for (l = b_start; l<b_end ; l++){ /* Bmat */
564:           col = garray[bj[l]];
565:           if (col > col_max) break;
566:           if (PetscBTLookup(table0,col)){
567:             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
568:             break; /* for l = start; l<end ; l++) */
569:           }
570:         }
571:       }
572:     }
573: 
574:     if (i < is_max - 1){
575:       idx_i  += n;   /* ptr to input is[i+1] array */
576:       nidx_i += isz; /* ptr to output is[i+1] array */
577:     }
578:     nidx[1+i] = isz; /* size of new is[i] */
579:   } /* for each is */
580:   PetscBTDestroy(&table0);
581: 
582:   return(0);
583: }