Actual source code: aomemscalable.c
petsc-3.4.5 2014-06-29
2: /*
3: The memory scalable AO application ordering routines. These store the
4: local orderings on each processor.
5: */
7: #include <../src/vec/is/ao/aoimpl.h> /*I "petscao.h" I*/
9: typedef struct {
10: PetscInt *app_loc; /* app_loc[i] is the partner for the ith local PETSc slot */
11: PetscInt *petsc_loc; /* petsc_loc[j] is the partner for the jth local app slot */
12: PetscLayout map; /* determines the local sizes of ao */
13: } AO_MemoryScalable;
15: /*
16: All processors have the same data so processor 1 prints it
17: */
20: PetscErrorCode AOView_MemoryScalable(AO ao,PetscViewer viewer)
21: {
22: PetscErrorCode ierr;
23: PetscMPIInt rank,size;
24: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
25: PetscBool iascii;
26: PetscMPIInt tag_app,tag_petsc;
27: PetscLayout map = aomems->map;
28: PetscInt *app,*app_loc,*petsc,*petsc_loc,len,i,j;
29: MPI_Status status;
32: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
33: if (!iascii) SETERRQ1(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"Viewer type %s not supported for AO MemoryScalable",((PetscObject)viewer)->type_name);
35: MPI_Comm_rank(PetscObjectComm((PetscObject)ao),&rank);
36: MPI_Comm_size(PetscObjectComm((PetscObject)ao),&size);
38: PetscObjectGetNewTag((PetscObject)ao,&tag_app);
39: PetscObjectGetNewTag((PetscObject)ao,&tag_petsc);
41: if (!rank) {
42: PetscViewerASCIIPrintf(viewer,"Number of elements in ordering %D\n",ao->N);
43: PetscViewerASCIIPrintf(viewer, "PETSc->App App->PETSc\n");
45: PetscMalloc2(map->N,PetscInt,&app,map->N,PetscInt,&petsc);
46: len = map->n;
47: /* print local AO */
48: PetscViewerASCIIPrintf(viewer,"Process [%D]\n",rank);
49: for (i=0; i<len; i++) {
50: PetscViewerASCIIPrintf(viewer,"%3D %3D %3D %3D\n",i,aomems->app_loc[i],i,aomems->petsc_loc[i]);
51: }
53: /* recv and print off-processor's AO */
54: for (i=1; i<size; i++) {
55: len = map->range[i+1] - map->range[i];
56: app_loc = app + map->range[i];
57: petsc_loc = petsc+ map->range[i];
58: MPI_Recv(app_loc,(PetscMPIInt)len,MPIU_INT,i,tag_app,PetscObjectComm((PetscObject)ao),&status);
59: MPI_Recv(petsc_loc,(PetscMPIInt)len,MPIU_INT,i,tag_petsc,PetscObjectComm((PetscObject)ao),&status);
60: PetscViewerASCIIPrintf(viewer,"Process [%D]\n",i);
61: for (j=0; j<len; j++) {
62: PetscViewerASCIIPrintf(viewer,"%3D %3D %3D %3D\n",map->range[i]+j,app_loc[j],map->range[i]+j,petsc_loc[j]);
63: }
64: }
65: PetscFree2(app,petsc);
67: } else {
68: /* send values */
69: MPI_Send((void*)aomems->app_loc,map->n,MPIU_INT,0,tag_app,PetscObjectComm((PetscObject)ao));
70: MPI_Send((void*)aomems->petsc_loc,map->n,MPIU_INT,0,tag_petsc,PetscObjectComm((PetscObject)ao));
71: }
72: PetscViewerFlush(viewer);
73: return(0);
74: }
78: PetscErrorCode AODestroy_MemoryScalable(AO ao)
79: {
80: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
81: PetscErrorCode ierr;
84: PetscFree2(aomems->app_loc,aomems->petsc_loc);
85: PetscLayoutDestroy(&aomems->map);
86: PetscFree(aomems);
87: return(0);
88: }
90: /*
91: Input Parameters:
92: + ao - the application ordering context
93: . n - the number of integers in ia[]
94: . ia - the integers; these are replaced with their mapped value
95: - maploc - app_loc or petsc_loc in struct "AO_MemoryScalable"
97: Output Parameter:
98: . ia - the mapped interges
99: */
102: PetscErrorCode AOMap_MemoryScalable_private(AO ao,PetscInt n,PetscInt *ia,PetscInt *maploc)
103: {
104: PetscErrorCode ierr;
105: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
106: MPI_Comm comm;
107: PetscMPIInt rank,size,tag1,tag2;
108: PetscInt *owner,*start,*nprocs,nsends,nreceives;
109: PetscInt nmax,count,*sindices,*rindices,i,j,idx,lastidx,*sindices2,*rindices2;
110: PetscInt *owners = aomems->map->range;
111: MPI_Request *send_waits,*recv_waits,*send_waits2,*recv_waits2;
112: MPI_Status recv_status;
113: PetscMPIInt nindices,source,widx;
114: PetscInt *rbuf,*sbuf;
115: MPI_Status *send_status,*send_status2;
118: PetscObjectGetComm((PetscObject)ao,&comm);
119: MPI_Comm_rank(comm,&rank);
120: MPI_Comm_size(comm,&size);
122: /* first count number of contributors to each processor */
123: PetscMalloc2(2*size,PetscInt,&nprocs,size,PetscInt,&start);
124: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
125: PetscMalloc(n*sizeof(PetscInt),&owner);
126: PetscMemzero(owner,n*sizeof(PetscInt));
128: j = 0;
129: lastidx = -1;
130: for (i=0; i<n; i++) {
131: /* if indices are NOT locally sorted, need to start search at the beginning */
132: if (lastidx > (idx = ia[i])) j = 0;
133: lastidx = idx;
134: for (; j<size; j++) {
135: if (idx >= owners[j] && idx < owners[j+1]) {
136: nprocs[2*j]++; /* num of indices to be sent */
137: nprocs[2*j+1] = 1; /* send to proc[j] */
138: owner[i] = j;
139: break;
140: }
141: }
142: }
143: nprocs[2*rank]=nprocs[2*rank+1]=0; /* do not receive from self! */
144: nsends = 0;
145: for (i=0; i<size; i++) nsends += nprocs[2*i+1];
147: /* inform other processors of number of messages and max length*/
148: PetscMaxSum(comm,nprocs,&nmax,&nreceives);
150: /* allocate arrays */
151: PetscObjectGetNewTag((PetscObject)ao,&tag1);
152: PetscObjectGetNewTag((PetscObject)ao,&tag2);
154: PetscMalloc2(nreceives*nmax,PetscInt,&rindices,nreceives,MPI_Request,&recv_waits);
155: PetscMalloc2(nsends*nmax,PetscInt,&rindices2,nsends,MPI_Request,&recv_waits2);
157: PetscMalloc3(n,PetscInt,&sindices,nsends,MPI_Request,&send_waits,nsends,MPI_Status,&send_status);
158: PetscMalloc3(n,PetscInt,&sindices2,nreceives,MPI_Request,&send_waits2,nreceives,MPI_Status,&send_status2);
160: /* post 1st receives: receive others requests
161: since we don't know how long each individual message is we
162: allocate the largest needed buffer for each receive. Potentially
163: this is a lot of wasted space.
164: */
165: for (i=0,count=0; i<nreceives; i++) {
166: MPI_Irecv(rindices+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+count++);
167: }
169: /* do 1st sends:
170: 1) starts[i] gives the starting index in svalues for stuff going to
171: the ith processor
172: */
173: start[0] = 0;
174: for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
175: for (i=0; i<n; i++) {
176: j = owner[i];
177: if (j != rank) {
178: sindices[start[j]++] = ia[i];
179: } else { /* compute my own map */
180: if (ia[i] >= owners[rank] && ia[i] < owners[rank+1]) {
181: ia[i] = maploc[ia[i]-owners[rank]];
182: } else {
183: ia[i] = -1; /* ia[i] is not in the range of 0 and N-1, maps it to -1 */
184: }
185: }
186: }
188: start[0] = 0;
189: for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
190: for (i=0,count=0; i<size; i++) {
191: if (nprocs[2*i+1]) {
192: /* send my request to others */
193: MPI_Isend(sindices+start[i],nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+count);
194: /* post receive for the answer of my request */
195: MPI_Irecv(sindices2+start[i],nprocs[2*i],MPIU_INT,i,tag2,comm,recv_waits2+count);
196: count++;
197: }
198: }
199: if (nsends != count) SETERRQ2(comm,PETSC_ERR_SUP,"nsends %d != count %d",nsends,count);
201: /* wait on 1st sends */
202: if (nsends) {
203: MPI_Waitall(nsends,send_waits,send_status);
204: }
206: /* 1st recvs: other's requests */
207: for (j=0; j< nreceives; j++) {
208: MPI_Waitany(nreceives,recv_waits,&widx,&recv_status); /* idx: index of handle for operation that completed */
209: MPI_Get_count(&recv_status,MPIU_INT,&nindices);
210: rbuf = rindices+nmax*widx; /* global index */
211: source = recv_status.MPI_SOURCE;
213: /* compute mapping */
214: sbuf = rbuf;
215: for (i=0; i<nindices; i++) sbuf[i] = maploc[rbuf[i]-owners[rank]];
217: /* send mapping back to the sender */
218: MPI_Isend(sbuf,nindices,MPIU_INT,source,tag2,comm,send_waits2+widx);
219: }
221: /* wait on 2nd sends */
222: if (nreceives) {
223: MPI_Waitall(nreceives,send_waits2,send_status2);
224: }
226: /* 2nd recvs: for the answer of my request */
227: for (j=0; j< nsends; j++) {
228: MPI_Waitany(nsends,recv_waits2,&widx,&recv_status);
229: MPI_Get_count(&recv_status,MPIU_INT,&nindices);
230: source = recv_status.MPI_SOURCE;
231: /* pack output ia[] */
232: rbuf = sindices2+start[source];
233: count = 0;
234: for (i=0; i<n; i++) {
235: if (source == owner[i]) ia[i] = rbuf[count++];
236: }
237: }
239: /* free arrays */
240: PetscFree2(nprocs,start);
241: PetscFree(owner);
242: PetscFree2(rindices,recv_waits);
243: PetscFree2(rindices2,recv_waits2);
244: PetscFree3(sindices,send_waits,send_status);
245: PetscFree3(sindices2,send_waits2,send_status2);
246: return(0);
247: }
251: PetscErrorCode AOPetscToApplication_MemoryScalable(AO ao,PetscInt n,PetscInt *ia)
252: {
253: PetscErrorCode ierr;
254: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
255: PetscInt *app_loc = aomems->app_loc;
258: AOMap_MemoryScalable_private(ao,n,ia,app_loc);
259: return(0);
260: }
264: PetscErrorCode AOApplicationToPetsc_MemoryScalable(AO ao,PetscInt n,PetscInt *ia)
265: {
266: PetscErrorCode ierr;
267: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
268: PetscInt *petsc_loc = aomems->petsc_loc;
271: AOMap_MemoryScalable_private(ao,n,ia,petsc_loc);
272: return(0);
273: }
275: static struct _AOOps AOOps_MemoryScalable = {
276: AOView_MemoryScalable,
277: AODestroy_MemoryScalable,
278: AOPetscToApplication_MemoryScalable,
279: AOApplicationToPetsc_MemoryScalable,
280: 0,
281: 0,
282: 0,
283: 0
284: };
288: PetscErrorCode AOCreateMemoryScalable_private(MPI_Comm comm,PetscInt napp,const PetscInt from_array[],const PetscInt to_array[],AO ao, PetscInt *aomap_loc)
289: {
290: PetscErrorCode ierr;
291: AO_MemoryScalable *aomems = (AO_MemoryScalable*)ao->data;
292: PetscLayout map = aomems->map;
293: PetscInt n_local = map->n,i,j;
294: PetscMPIInt rank,size,tag;
295: PetscInt *owner,*start,*nprocs,nsends,nreceives;
296: PetscInt nmax,count,*sindices,*rindices,idx,lastidx;
297: PetscInt *owners = aomems->map->range;
298: MPI_Request *send_waits,*recv_waits;
299: MPI_Status recv_status;
300: PetscMPIInt nindices,widx;
301: PetscInt *rbuf;
302: PetscInt n=napp,ip,ia;
303: MPI_Status *send_status;
306: PetscMemzero(aomap_loc,n_local*sizeof(PetscInt));
308: MPI_Comm_rank(comm,&rank);
309: MPI_Comm_size(comm,&size);
311: /* first count number of contributors (of from_array[]) to each processor */
312: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
313: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
314: PetscMalloc(n*sizeof(PetscInt),&owner);
316: j = 0;
317: lastidx = -1;
318: for (i=0; i<n; i++) {
319: /* if indices are NOT locally sorted, need to start search at the beginning */
320: if (lastidx > (idx = from_array[i])) j = 0;
321: lastidx = idx;
322: for (; j<size; j++) {
323: if (idx >= owners[j] && idx < owners[j+1]) {
324: nprocs[2*j] += 2; /* num of indices to be sent - in pairs (ip,ia) */
325: nprocs[2*j+1] = 1; /* send to proc[j] */
326: owner[i] = j;
327: break;
328: }
329: }
330: }
331: nprocs[2*rank]=nprocs[2*rank+1]=0; /* do not receive from self! */
332: nsends = 0;
333: for (i=0; i<size; i++) nsends += nprocs[2*i+1];
335: /* inform other processors of number of messages and max length*/
336: PetscMaxSum(comm,nprocs,&nmax,&nreceives);
338: /* allocate arrays */
339: PetscObjectGetNewTag((PetscObject)ao,&tag);
340: PetscMalloc2(nreceives*nmax,PetscInt,&rindices,nreceives,MPI_Request,&recv_waits);
341: PetscMalloc3(2*n,PetscInt,&sindices,nsends,MPI_Request,&send_waits,nsends,MPI_Status,&send_status);
342: PetscMalloc(size*sizeof(PetscInt),&start);
344: /* post receives: */
345: for (i=0; i<nreceives; i++) {
346: MPI_Irecv(rindices+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
347: }
349: /* do sends:
350: 1) starts[i] gives the starting index in svalues for stuff going to
351: the ith processor
352: */
353: start[0] = 0;
354: for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
355: for (i=0; i<n; i++) {
356: j = owner[i];
357: if (j != rank) {
358: ip = from_array[i];
359: ia = to_array[i];
360: sindices[start[j]++] = ip;
361: sindices[start[j]++] = ia;
362: } else { /* compute my own map */
363: ip = from_array[i] - owners[rank];
364: ia = to_array[i];
365: aomap_loc[ip] = ia;
366: }
367: }
369: start[0] = 0;
370: for (i=1; i<size; i++) start[i] = start[i-1] + nprocs[2*i-2];
371: for (i=0,count=0; i<size; i++) {
372: if (nprocs[2*i+1]) {
373: MPI_Isend(sindices+start[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count);
374: count++;
375: }
376: }
377: if (nsends != count) SETERRQ2(comm,PETSC_ERR_SUP,"nsends %d != count %d",nsends,count);
379: /* wait on sends */
380: if (nsends) {
381: MPI_Waitall(nsends,send_waits,send_status);
382: }
384: /* recvs */
385: count=0;
386: for (j= nreceives; j>0; j--) {
387: MPI_Waitany(nreceives,recv_waits,&widx,&recv_status);
388: MPI_Get_count(&recv_status,MPIU_INT,&nindices);
389: rbuf = rindices+nmax*widx; /* global index */
391: /* compute local mapping */
392: for (i=0; i<nindices; i+=2) { /* pack aomap_loc */
393: ip = rbuf[i] - owners[rank]; /* local index */
394: ia = rbuf[i+1];
395: aomap_loc[ip] = ia;
396: }
397: count++;
398: }
400: PetscFree(start);
401: PetscFree3(sindices,send_waits,send_status);
402: PetscFree2(rindices,recv_waits);
403: PetscFree(owner);
404: PetscFree(nprocs);
405: return(0);
406: }
410: PETSC_EXTERN PetscErrorCode AOCreate_MemoryScalable(AO ao)
411: {
412: PetscErrorCode ierr;
413: IS isapp=ao->isapp,ispetsc=ao->ispetsc;
414: const PetscInt *mypetsc,*myapp;
415: PetscInt napp,n_local,N,i,start,*petsc,*lens,*disp;
416: MPI_Comm comm;
417: AO_MemoryScalable *aomems;
418: PetscLayout map;
419: PetscMPIInt size,rank;
422: /* create special struct aomems */
423: PetscNewLog(ao, AO_MemoryScalable, &aomems);
424: ao->data = (void*) aomems;
425: PetscMemcpy(ao->ops,&AOOps_MemoryScalable,sizeof(struct _AOOps));
426: PetscObjectChangeTypeName((PetscObject)ao,AOMEMORYSCALABLE);
428: /* transmit all local lengths of isapp to all processors */
429: PetscObjectGetComm((PetscObject)isapp,&comm);
430: MPI_Comm_size(comm, &size);
431: MPI_Comm_rank(comm, &rank);
432: PetscMalloc2(size,PetscInt,&lens,size,PetscInt,&disp);
433: ISGetLocalSize(isapp,&napp);
434: MPI_Allgather(&napp, 1, MPIU_INT, lens, 1, MPIU_INT, comm);
436: N = 0;
437: for (i = 0; i < size; i++) {
438: disp[i] = N;
439: N += lens[i];
440: }
442: /* If ispetsc is 0 then use "natural" numbering */
443: if (napp) {
444: if (!ispetsc) {
445: start = disp[rank];
446: PetscMalloc((napp+1) * sizeof(PetscInt), &petsc);
447: for (i=0; i<napp; i++) petsc[i] = start + i;
448: } else {
449: ISGetIndices(ispetsc,&mypetsc);
450: petsc = (PetscInt*)mypetsc;
451: }
452: }
454: /* create a map with global size N - used to determine the local sizes of ao - shall we use local napp instead of N? */
455: PetscLayoutCreate(comm,&map);
456: map->bs = 1;
457: map->N = N;
458: PetscLayoutSetUp(map);
460: ao->N = N;
461: ao->n = map->n;
462: aomems->map = map;
464: /* create distributed indices app_loc: petsc->app and petsc_loc: app->petsc */
465: n_local = map->n;
466: PetscMalloc2(n_local,PetscInt, &aomems->app_loc,n_local,PetscInt,&aomems->petsc_loc);
467: PetscLogObjectMemory(ao,2*n_local*sizeof(PetscInt));
468: PetscMemzero(aomems->app_loc,n_local*sizeof(PetscInt));
469: PetscMemzero(aomems->petsc_loc,n_local*sizeof(PetscInt));
470: ISGetIndices(isapp,&myapp);
472: AOCreateMemoryScalable_private(comm,napp,petsc,myapp,ao,aomems->app_loc);
473: AOCreateMemoryScalable_private(comm,napp,myapp,petsc,ao,aomems->petsc_loc);
475: ISRestoreIndices(isapp,&myapp);
476: if (napp) {
477: if (ispetsc) {
478: ISRestoreIndices(ispetsc,&mypetsc);
479: } else {
480: PetscFree(petsc);
481: }
482: }
483: PetscFree2(lens,disp);
484: return(0);
485: }
489: /*@C
490: AOCreateMemoryScalable - Creates a memory scalable application ordering using two integer arrays.
492: Collective on MPI_Comm
494: Input Parameters:
495: + comm - MPI communicator that is to share AO
496: . napp - size of integer arrays
497: . myapp - integer array that defines an ordering
498: - mypetsc - integer array that defines another ordering (may be NULL to
499: indicate the natural ordering, that is 0,1,2,3,...)
501: Output Parameter:
502: . aoout - the new application ordering
504: Level: beginner
506: Notes: The arrays myapp and mypetsc must contain the all the integers 0 to napp-1 with no duplicates; that is there cannot be any "holes"
507: in the indices. Use AOCreateMapping() or AOCreateMappingIS() if you wish to have "holes" in the indices.
508: Comparing with AOCreateBasic(), this routine trades memory with message communication.
510: .keywords: AO, create
512: .seealso: AOCreateMemoryScalableIS(), AODestroy(), AOPetscToApplication(), AOApplicationToPetsc()
513: @*/
514: PetscErrorCode AOCreateMemoryScalable(MPI_Comm comm,PetscInt napp,const PetscInt myapp[],const PetscInt mypetsc[],AO *aoout)
515: {
517: IS isapp,ispetsc;
518: const PetscInt *app=myapp,*petsc=mypetsc;
521: ISCreateGeneral(comm,napp,app,PETSC_USE_POINTER,&isapp);
522: if (mypetsc) {
523: ISCreateGeneral(comm,napp,petsc,PETSC_USE_POINTER,&ispetsc);
524: } else {
525: ispetsc = NULL;
526: }
527: AOCreateMemoryScalableIS(isapp,ispetsc,aoout);
528: ISDestroy(&isapp);
529: if (mypetsc) {
530: ISDestroy(&ispetsc);
531: }
532: return(0);
533: }
537: /*@C
538: AOCreateMemoryScalableIS - Creates a memory scalable application ordering using two index sets.
540: Collective on IS
542: Input Parameters:
543: + isapp - index set that defines an ordering
544: - ispetsc - index set that defines another ordering (may be NULL to use the
545: natural ordering)
547: Output Parameter:
548: . aoout - the new application ordering
550: Level: beginner
552: Notes: The index sets isapp and ispetsc must contain the all the integers 0 to napp-1 (where napp is the length of the index sets) with no duplicates;
553: that is there cannot be any "holes".
554: Comparing with AOCreateBasicIS(), this routine trades memory with message communication.
555: .keywords: AO, create
557: .seealso: AOCreateMemoryScalable(), AODestroy()
558: @*/
559: PetscErrorCode AOCreateMemoryScalableIS(IS isapp,IS ispetsc,AO *aoout)
560: {
562: MPI_Comm comm;
563: AO ao;
566: PetscObjectGetComm((PetscObject)isapp,&comm);
567: AOCreate(comm,&ao);
568: AOSetIS(ao,isapp,ispetsc);
569: AOSetType(ao,AOMEMORYSCALABLE);
570: AOViewFromOptions(ao,NULL,"-ao_view");
571: *aoout = ao;
572: return(0);
573: }