Actual source code: redistribute.c
petsc-3.4.5 2014-06-29
2: /*
3: This file defines a "solve the problem redistributely on each subgroup of processor" preconditioner.
4: */
5: #include <petsc-private/pcimpl.h> /*I "petscksp.h" I*/
6: #include <petscksp.h>
8: typedef struct {
9: KSP ksp;
10: Vec x,b;
11: VecScatter scatter;
12: IS is;
13: PetscInt dcnt,*drows; /* these are the local rows that have only diagonal entry */
14: PetscScalar *diag;
15: Vec work;
16: } PC_Redistribute;
20: static PetscErrorCode PCView_Redistribute(PC pc,PetscViewer viewer)
21: {
22: PC_Redistribute *red = (PC_Redistribute*)pc->data;
23: PetscErrorCode ierr;
24: PetscBool iascii,isstring;
25: PetscInt ncnt,N;
28: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
29: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
30: if (iascii) {
31: MPI_Allreduce(&red->dcnt,&ncnt,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)pc));
32: MatGetSize(pc->pmat,&N,NULL);
33: PetscViewerASCIIPrintf(viewer," Number rows eliminated %D Percentage rows eliminated %g\n",ncnt,100.0*((PetscReal)ncnt)/((PetscReal)N));
34: PetscViewerASCIIPrintf(viewer," Redistribute preconditioner: \n");
35: KSPView(red->ksp,viewer);
36: } else if (isstring) {
37: PetscViewerStringSPrintf(viewer," Redistribute preconditioner");
38: KSPView(red->ksp,viewer);
39: }
40: return(0);
41: }
45: static PetscErrorCode PCSetUp_Redistribute(PC pc)
46: {
47: PC_Redistribute *red = (PC_Redistribute*)pc->data;
48: PetscErrorCode ierr;
49: MPI_Comm comm;
50: PetscInt rstart,rend,i,nz,cnt,*rows,ncnt,dcnt,*drows;
51: PetscLayout map,nmap;
52: PetscMPIInt size,imdex,tag,n;
53: PetscInt *source = NULL;
54: PetscMPIInt *nprocs = NULL,nrecvs;
55: PetscInt j,nsends;
56: PetscInt *owner = NULL,*starts = NULL,count,slen;
57: PetscInt *rvalues,*svalues,recvtotal;
58: PetscMPIInt *onodes1,*olengths1;
59: MPI_Request *send_waits = NULL,*recv_waits = NULL;
60: MPI_Status recv_status,*send_status;
61: Vec tvec,diag;
62: Mat tmat;
63: const PetscScalar *d;
66: if (pc->setupcalled) {
67: KSPGetOperators(red->ksp,NULL,&tmat,NULL);
68: MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_REUSE_MATRIX,&tmat);
69: KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
70: } else {
71: PetscInt NN;
73: PetscObjectGetComm((PetscObject)pc,&comm);
74: MPI_Comm_size(comm,&size);
75: PetscObjectGetNewTag((PetscObject)pc,&tag);
77: /* count non-diagonal rows on process */
78: MatGetOwnershipRange(pc->mat,&rstart,&rend);
79: cnt = 0;
80: for (i=rstart; i<rend; i++) {
81: MatGetRow(pc->mat,i,&nz,NULL,NULL);
82: if (nz > 1) cnt++;
83: MatRestoreRow(pc->mat,i,&nz,NULL,NULL);
84: }
85: PetscMalloc(cnt*sizeof(PetscInt),&rows);
86: PetscMalloc((rend - rstart - cnt)*sizeof(PetscInt),&drows);
88: /* list non-diagonal rows on process */
89: cnt = 0; dcnt = 0;
90: for (i=rstart; i<rend; i++) {
91: MatGetRow(pc->mat,i,&nz,NULL,NULL);
92: if (nz > 1) rows[cnt++] = i;
93: else drows[dcnt++] = i - rstart;
94: MatRestoreRow(pc->mat,i,&nz,NULL,NULL);
95: }
97: /* create PetscLayout for non-diagonal rows on each process */
98: PetscLayoutCreate(comm,&map);
99: PetscLayoutSetLocalSize(map,cnt);
100: PetscLayoutSetBlockSize(map,1);
101: PetscLayoutSetUp(map);
102: rstart = map->rstart;
103: rend = map->rend;
105: /* create PetscLayout for load-balanced non-diagonal rows on each process */
106: PetscLayoutCreate(comm,&nmap);
107: MPI_Allreduce(&cnt,&ncnt,1,MPIU_INT,MPI_SUM,comm);
108: PetscLayoutSetSize(nmap,ncnt);
109: PetscLayoutSetBlockSize(nmap,1);
110: PetscLayoutSetUp(nmap);
112: MatGetSize(pc->pmat,&NN,NULL);
113: PetscInfo2(pc,"Number of diagonal rows eliminated %d, percentage eliminated %g\n",NN-ncnt,((PetscReal)(NN-ncnt))/((PetscReal)(NN)));
114: /*
115: this code is taken from VecScatterCreate_PtoS()
116: Determines what rows need to be moved where to
117: load balance the non-diagonal rows
118: */
119: /* count number of contributors to each processor */
120: PetscMalloc2(size,PetscMPIInt,&nprocs,cnt,PetscInt,&owner);
121: PetscMemzero(nprocs,size*sizeof(PetscMPIInt));
122: j = 0;
123: nsends = 0;
124: for (i=rstart; i<rend; i++) {
125: if (i < nmap->range[j]) j = 0;
126: for (; j<size; j++) {
127: if (i < nmap->range[j+1]) {
128: if (!nprocs[j]++) nsends++;
129: owner[i-rstart] = j;
130: break;
131: }
132: }
133: }
134: /* inform other processors of number of messages and max length*/
135: PetscGatherNumberOfMessages(comm,NULL,nprocs,&nrecvs);
136: PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);
137: PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);
138: recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
140: /* post receives: rvalues - rows I will own; count - nu */
141: PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);
142: count = 0;
143: for (i=0; i<nrecvs; i++) {
144: MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);
145: count += olengths1[i];
146: }
148: /* do sends:
149: 1) starts[i] gives the starting index in svalues for stuff going to
150: the ith processor
151: */
152: PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);
153: starts[0] = 0;
154: for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[i-1];
155: for (i=0; i<cnt; i++) svalues[starts[owner[i]]++] = rows[i];
156: for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
157: red->drows = drows;
158: red->dcnt = dcnt;
159: PetscFree(rows);
161: starts[0] = 0;
162: for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[i-1];
163: count = 0;
164: for (i=0; i<size; i++) {
165: if (nprocs[i]) {
166: MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);
167: }
168: }
170: /* wait on receives */
171: count = nrecvs;
172: slen = 0;
173: while (count) {
174: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
175: /* unpack receives into our local space */
176: MPI_Get_count(&recv_status,MPIU_INT,&n);
177: slen += n;
178: count--;
179: }
180: if (slen != recvtotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);
182: ISCreateGeneral(comm,slen,rvalues,PETSC_COPY_VALUES,&red->is);
184: /* free up all work space */
185: PetscFree(olengths1);
186: PetscFree(onodes1);
187: PetscFree3(rvalues,source,recv_waits);
188: PetscFree2(nprocs,owner);
189: if (nsends) { /* wait on sends */
190: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
191: MPI_Waitall(nsends,send_waits,send_status);
192: PetscFree(send_status);
193: }
194: PetscFree3(svalues,send_waits,starts);
195: PetscLayoutDestroy(&map);
196: PetscLayoutDestroy(&nmap);
198: VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);
199: VecDuplicate(red->b,&red->x);
200: MatGetVecs(pc->pmat,&tvec,NULL);
201: VecScatterCreate(tvec,red->is,red->b,NULL,&red->scatter);
202: VecDestroy(&tvec);
203: MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);
204: KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
205: MatDestroy(&tmat);
206: }
208: /* get diagonal portion of matrix */
209: PetscMalloc(red->dcnt*sizeof(PetscScalar),&red->diag);
210: MatGetVecs(pc->pmat,&diag,NULL);
211: MatGetDiagonal(pc->pmat,diag);
212: VecGetArrayRead(diag,&d);
213: for (i=0; i<red->dcnt; i++) red->diag[i] = 1.0/d[red->drows[i]];
214: VecRestoreArrayRead(diag,&d);
215: VecDestroy(&diag);
216: KSPSetUp(red->ksp);
217: return(0);
218: }
222: static PetscErrorCode PCApply_Redistribute(PC pc,Vec b,Vec x)
223: {
224: PC_Redistribute *red = (PC_Redistribute*)pc->data;
225: PetscErrorCode ierr;
226: PetscInt dcnt = red->dcnt,i;
227: const PetscInt *drows = red->drows;
228: PetscScalar *xwork;
229: const PetscScalar *bwork,*diag = red->diag;
232: if (!red->work) {
233: VecDuplicate(b,&red->work);
234: }
235: /* compute the rows of solution that have diagonal entries only */
236: VecSet(x,0.0); /* x = diag(A)^{-1} b */
237: VecGetArray(x,&xwork);
238: VecGetArrayRead(b,&bwork);
239: for (i=0; i<dcnt; i++) xwork[drows[i]] = diag[i]*bwork[drows[i]];
240: PetscLogFlops(dcnt);
241: VecRestoreArray(red->work,&xwork);
242: VecRestoreArrayRead(b,&bwork);
243: /* update the right hand side for the reduced system with diagonal rows (and corresponding columns) removed */
244: MatMult(pc->pmat,x,red->work);
245: VecAYPX(red->work,-1.0,b); /* red->work = b - A x */
247: VecScatterBegin(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
248: VecScatterEnd(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
249: KSPSolve(red->ksp,red->b,red->x);
250: VecScatterBegin(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
251: VecScatterEnd(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
252: return(0);
253: }
257: static PetscErrorCode PCDestroy_Redistribute(PC pc)
258: {
259: PC_Redistribute *red = (PC_Redistribute*)pc->data;
260: PetscErrorCode ierr;
263: VecScatterDestroy(&red->scatter);
264: ISDestroy(&red->is);
265: VecDestroy(&red->b);
266: VecDestroy(&red->x);
267: KSPDestroy(&red->ksp);
268: VecDestroy(&red->work);
269: PetscFree(red->drows);
270: PetscFree(red->diag);
271: PetscFree(pc->data);
272: return(0);
273: }
277: static PetscErrorCode PCSetFromOptions_Redistribute(PC pc)
278: {
279: PetscErrorCode ierr;
280: PC_Redistribute *red = (PC_Redistribute*)pc->data;
283: KSPSetFromOptions(red->ksp);
284: return(0);
285: }
289: /*@
290: PCRedistributeGetKSP - Gets the KSP created by the PCREDISTRIBUTE
292: Not Collective
294: Input Parameter:
295: . pc - the preconditioner context
297: Output Parameter:
298: . innerksp - the inner KSP
300: Level: advanced
302: .keywords: PC, redistribute solve
303: @*/
304: PetscErrorCode PCRedistributeGetKSP(PC pc,KSP *innerksp)
305: {
306: PC_Redistribute *red = (PC_Redistribute*)pc->data;
311: *innerksp = red->ksp;
312: return(0);
313: }
315: /* -------------------------------------------------------------------------------------*/
316: /*MC
317: PCREDISTRIBUTE - Redistributes a matrix for load balancing, removing the rows that only have a diagonal entry and then applys a KSP to that new matrix
319: Options for the redistribute preconditioners can be set with -redistribute_ksp_xxx <values> and -redistribute_pc_xxx <values>
321: Notes: Usually run this with -ksp_type preonly
323: If you have used MatZeroRows() to eliminate (for example, Dirichlet) boundary conditions for a symmetric problem then you can use, for example, -ksp_type preonly
324: -pc_type redistribute -redistribute_ksp_type cg -redistribute_pc_type bjacobi -redistribute_sub_pc_type icc to take advantage of the symmetry.
326: This does NOT call a partitioner to reorder rows to lower communication; the ordering of the rows in the original matrix and redistributed matrix is the same.
328: Developer Notes: Should add an option to this preconditioner to use a partitioner to redistribute the rows to lower communication.
330: Level: intermediate
332: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PCRedistributeGetKSP()
333: M*/
337: PETSC_EXTERN PetscErrorCode PCCreate_Redistribute(PC pc)
338: {
339: PetscErrorCode ierr;
340: PC_Redistribute *red;
341: const char *prefix;
344: PetscNewLog(pc,PC_Redistribute,&red);
345: pc->data = (void*)red;
347: pc->ops->apply = PCApply_Redistribute;
348: pc->ops->applytranspose = 0;
349: pc->ops->setup = PCSetUp_Redistribute;
350: pc->ops->destroy = PCDestroy_Redistribute;
351: pc->ops->setfromoptions = PCSetFromOptions_Redistribute;
352: pc->ops->view = PCView_Redistribute;
354: KSPCreate(PetscObjectComm((PetscObject)pc),&red->ksp);
355: PetscObjectIncrementTabLevel((PetscObject)red->ksp,(PetscObject)pc,1);
356: PetscLogObjectParent(pc,red->ksp);
357: PCGetOptionsPrefix(pc,&prefix);
358: KSPSetOptionsPrefix(red->ksp,prefix);
359: KSPAppendOptionsPrefix(red->ksp,"redistribute_");
360: return(0);
361: }