Actual source code: redistribute.c
petsc-3.14.6 2021-03-30
2: /*
3: This file defines a "solve the problem redistributely on each subgroup of processor" preconditioner.
4: */
5: #include <petsc/private/pcimpl.h>
6: #include <petscksp.h>
8: typedef struct {
9: KSP ksp;
10: Vec x,b;
11: VecScatter scatter;
12: IS is;
13: PetscInt dcnt,*drows; /* these are the local rows that have only diagonal entry */
14: PetscScalar *diag;
15: Vec work;
16: } PC_Redistribute;
18: static PetscErrorCode PCView_Redistribute(PC pc,PetscViewer viewer)
19: {
20: PC_Redistribute *red = (PC_Redistribute*)pc->data;
21: PetscErrorCode ierr;
22: PetscBool iascii,isstring;
23: PetscInt ncnt,N;
26: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
27: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSTRING,&isstring);
28: if (iascii) {
29: MPIU_Allreduce(&red->dcnt,&ncnt,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)pc));
30: MatGetSize(pc->pmat,&N,NULL);
31: PetscViewerASCIIPrintf(viewer," Number rows eliminated %D Percentage rows eliminated %g\n",ncnt,100.0*((PetscReal)ncnt)/((PetscReal)N));
32: PetscViewerASCIIPrintf(viewer," Redistribute preconditioner: \n");
33: KSPView(red->ksp,viewer);
34: } else if (isstring) {
35: PetscViewerStringSPrintf(viewer," Redistribute preconditioner");
36: KSPView(red->ksp,viewer);
37: }
38: return(0);
39: }
41: static PetscErrorCode PCSetUp_Redistribute(PC pc)
42: {
43: PC_Redistribute *red = (PC_Redistribute*)pc->data;
44: PetscErrorCode ierr;
45: MPI_Comm comm;
46: PetscInt rstart,rend,i,nz,cnt,*rows,ncnt,dcnt,*drows;
47: PetscLayout map,nmap;
48: PetscMPIInt size,tag,n;
49: PETSC_UNUSED PetscMPIInt imdex;
50: PetscInt *source = NULL;
51: PetscMPIInt *sizes = NULL,nrecvs;
52: PetscInt j,nsends;
53: PetscInt *owner = NULL,*starts = NULL,count,slen;
54: PetscInt *rvalues,*svalues,recvtotal;
55: PetscMPIInt *onodes1,*olengths1;
56: MPI_Request *send_waits = NULL,*recv_waits = NULL;
57: MPI_Status recv_status,*send_status;
58: Vec tvec,diag;
59: Mat tmat;
60: const PetscScalar *d;
63: if (pc->setupcalled) {
64: KSPGetOperators(red->ksp,NULL,&tmat);
65: MatCreateSubMatrix(pc->pmat,red->is,red->is,MAT_REUSE_MATRIX,&tmat);
66: KSPSetOperators(red->ksp,tmat,tmat);
67: } else {
68: PetscInt NN;
70: PetscObjectGetComm((PetscObject)pc,&comm);
71: MPI_Comm_size(comm,&size);
72: PetscObjectGetNewTag((PetscObject)pc,&tag);
74: /* count non-diagonal rows on process */
75: MatGetOwnershipRange(pc->mat,&rstart,&rend);
76: cnt = 0;
77: for (i=rstart; i<rend; i++) {
78: MatGetRow(pc->mat,i,&nz,NULL,NULL);
79: if (nz > 1) cnt++;
80: MatRestoreRow(pc->mat,i,&nz,NULL,NULL);
81: }
82: PetscMalloc1(cnt,&rows);
83: PetscMalloc1(rend - rstart - cnt,&drows);
85: /* list non-diagonal rows on process */
86: cnt = 0; dcnt = 0;
87: for (i=rstart; i<rend; i++) {
88: MatGetRow(pc->mat,i,&nz,NULL,NULL);
89: if (nz > 1) rows[cnt++] = i;
90: else drows[dcnt++] = i - rstart;
91: MatRestoreRow(pc->mat,i,&nz,NULL,NULL);
92: }
94: /* create PetscLayout for non-diagonal rows on each process */
95: PetscLayoutCreate(comm,&map);
96: PetscLayoutSetLocalSize(map,cnt);
97: PetscLayoutSetBlockSize(map,1);
98: PetscLayoutSetUp(map);
99: rstart = map->rstart;
100: rend = map->rend;
102: /* create PetscLayout for load-balanced non-diagonal rows on each process */
103: PetscLayoutCreate(comm,&nmap);
104: MPIU_Allreduce(&cnt,&ncnt,1,MPIU_INT,MPI_SUM,comm);
105: PetscLayoutSetSize(nmap,ncnt);
106: PetscLayoutSetBlockSize(nmap,1);
107: PetscLayoutSetUp(nmap);
109: MatGetSize(pc->pmat,&NN,NULL);
110: PetscInfo2(pc,"Number of diagonal rows eliminated %d, percentage eliminated %g\n",NN-ncnt,((PetscReal)(NN-ncnt))/((PetscReal)(NN)));
111: /*
112: this code is taken from VecScatterCreate_PtoS()
113: Determines what rows need to be moved where to
114: load balance the non-diagonal rows
115: */
116: /* count number of contributors to each processor */
117: PetscMalloc2(size,&sizes,cnt,&owner);
118: PetscArrayzero(sizes,size);
119: j = 0;
120: nsends = 0;
121: for (i=rstart; i<rend; i++) {
122: if (i < nmap->range[j]) j = 0;
123: for (; j<size; j++) {
124: if (i < nmap->range[j+1]) {
125: if (!sizes[j]++) nsends++;
126: owner[i-rstart] = j;
127: break;
128: }
129: }
130: }
131: /* inform other processors of number of messages and max length*/
132: PetscGatherNumberOfMessages(comm,NULL,sizes,&nrecvs);
133: PetscGatherMessageLengths(comm,nsends,nrecvs,sizes,&onodes1,&olengths1);
134: PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);
135: recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
137: /* post receives: rvalues - rows I will own; count - nu */
138: PetscMalloc3(recvtotal,&rvalues,nrecvs,&source,nrecvs,&recv_waits);
139: count = 0;
140: for (i=0; i<nrecvs; i++) {
141: MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);
142: count += olengths1[i];
143: }
145: /* do sends:
146: 1) starts[i] gives the starting index in svalues for stuff going to
147: the ith processor
148: */
149: PetscMalloc3(cnt,&svalues,nsends,&send_waits,size,&starts);
150: starts[0] = 0;
151: for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[i-1];
152: for (i=0; i<cnt; i++) svalues[starts[owner[i]]++] = rows[i];
153: for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
154: red->drows = drows;
155: red->dcnt = dcnt;
156: PetscFree(rows);
158: starts[0] = 0;
159: for (i=1; i<size; i++) starts[i] = starts[i-1] + sizes[i-1];
160: count = 0;
161: for (i=0; i<size; i++) {
162: if (sizes[i]) {
163: MPI_Isend(svalues+starts[i],sizes[i],MPIU_INT,i,tag,comm,send_waits+count++);
164: }
165: }
167: /* wait on receives */
168: count = nrecvs;
169: slen = 0;
170: while (count) {
171: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
172: /* unpack receives into our local space */
173: MPI_Get_count(&recv_status,MPIU_INT,&n);
174: slen += n;
175: count--;
176: }
177: if (slen != recvtotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);
179: ISCreateGeneral(comm,slen,rvalues,PETSC_COPY_VALUES,&red->is);
181: /* free up all work space */
182: PetscFree(olengths1);
183: PetscFree(onodes1);
184: PetscFree3(rvalues,source,recv_waits);
185: PetscFree2(sizes,owner);
186: if (nsends) { /* wait on sends */
187: PetscMalloc1(nsends,&send_status);
188: MPI_Waitall(nsends,send_waits,send_status);
189: PetscFree(send_status);
190: }
191: PetscFree3(svalues,send_waits,starts);
192: PetscLayoutDestroy(&map);
193: PetscLayoutDestroy(&nmap);
195: VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);
196: VecDuplicate(red->b,&red->x);
197: MatCreateVecs(pc->pmat,&tvec,NULL);
198: VecScatterCreate(tvec,red->is,red->b,NULL,&red->scatter);
199: VecDestroy(&tvec);
200: MatCreateSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);
201: KSPSetOperators(red->ksp,tmat,tmat);
202: MatDestroy(&tmat);
203: }
205: /* get diagonal portion of matrix */
206: PetscFree(red->diag);
207: PetscMalloc1(red->dcnt,&red->diag);
208: MatCreateVecs(pc->pmat,&diag,NULL);
209: MatGetDiagonal(pc->pmat,diag);
210: VecGetArrayRead(diag,&d);
211: for (i=0; i<red->dcnt; i++) red->diag[i] = 1.0/d[red->drows[i]];
212: VecRestoreArrayRead(diag,&d);
213: VecDestroy(&diag);
214: KSPSetUp(red->ksp);
215: return(0);
216: }
218: static PetscErrorCode PCApply_Redistribute(PC pc,Vec b,Vec x)
219: {
220: PC_Redistribute *red = (PC_Redistribute*)pc->data;
221: PetscErrorCode ierr;
222: PetscInt dcnt = red->dcnt,i;
223: const PetscInt *drows = red->drows;
224: PetscScalar *xwork;
225: const PetscScalar *bwork,*diag = red->diag;
228: if (!red->work) {
229: VecDuplicate(b,&red->work);
230: }
231: /* compute the rows of solution that have diagonal entries only */
232: VecSet(x,0.0); /* x = diag(A)^{-1} b */
233: VecGetArray(x,&xwork);
234: VecGetArrayRead(b,&bwork);
235: for (i=0; i<dcnt; i++) xwork[drows[i]] = diag[i]*bwork[drows[i]];
236: PetscLogFlops(dcnt);
237: VecRestoreArray(red->work,&xwork);
238: VecRestoreArrayRead(b,&bwork);
239: /* update the right hand side for the reduced system with diagonal rows (and corresponding columns) removed */
240: MatMult(pc->pmat,x,red->work);
241: VecAYPX(red->work,-1.0,b); /* red->work = b - A x */
243: VecScatterBegin(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
244: VecScatterEnd(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
245: KSPSolve(red->ksp,red->b,red->x);
246: KSPCheckSolve(red->ksp,pc,red->x);
247: VecScatterBegin(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
248: VecScatterEnd(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
249: return(0);
250: }
252: static PetscErrorCode PCDestroy_Redistribute(PC pc)
253: {
254: PC_Redistribute *red = (PC_Redistribute*)pc->data;
255: PetscErrorCode ierr;
258: VecScatterDestroy(&red->scatter);
259: ISDestroy(&red->is);
260: VecDestroy(&red->b);
261: VecDestroy(&red->x);
262: KSPDestroy(&red->ksp);
263: VecDestroy(&red->work);
264: PetscFree(red->drows);
265: PetscFree(red->diag);
266: PetscFree(pc->data);
267: return(0);
268: }
270: static PetscErrorCode PCSetFromOptions_Redistribute(PetscOptionItems *PetscOptionsObject,PC pc)
271: {
272: PetscErrorCode ierr;
273: PC_Redistribute *red = (PC_Redistribute*)pc->data;
276: KSPSetFromOptions(red->ksp);
277: return(0);
278: }
280: /*@
281: PCRedistributeGetKSP - Gets the KSP created by the PCREDISTRIBUTE
283: Not Collective
285: Input Parameter:
286: . pc - the preconditioner context
288: Output Parameter:
289: . innerksp - the inner KSP
291: Level: advanced
293: @*/
294: PetscErrorCode PCRedistributeGetKSP(PC pc,KSP *innerksp)
295: {
296: PC_Redistribute *red = (PC_Redistribute*)pc->data;
301: *innerksp = red->ksp;
302: return(0);
303: }
305: /* -------------------------------------------------------------------------------------*/
306: /*MC
307: PCREDISTRIBUTE - Redistributes a matrix for load balancing, removing the rows that only have a diagonal entry and then applys a KSP to that new matrix
309: Options for the redistribute preconditioners can be set with -redistribute_ksp_xxx <values> and -redistribute_pc_xxx <values>
311: Notes:
312: Usually run this with -ksp_type preonly
314: If you have used MatZeroRows() to eliminate (for example, Dirichlet) boundary conditions for a symmetric problem then you can use, for example, -ksp_type preonly
315: -pc_type redistribute -redistribute_ksp_type cg -redistribute_pc_type bjacobi -redistribute_sub_pc_type icc to take advantage of the symmetry.
317: This does NOT call a partitioner to reorder rows to lower communication; the ordering of the rows in the original matrix and redistributed matrix is the same.
319: Developer Notes:
320: Should add an option to this preconditioner to use a partitioner to redistribute the rows to lower communication.
322: Level: intermediate
324: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PCRedistributeGetKSP()
325: M*/
327: PETSC_EXTERN PetscErrorCode PCCreate_Redistribute(PC pc)
328: {
329: PetscErrorCode ierr;
330: PC_Redistribute *red;
331: const char *prefix;
334: PetscNewLog(pc,&red);
335: pc->data = (void*)red;
337: pc->ops->apply = PCApply_Redistribute;
338: pc->ops->applytranspose = NULL;
339: pc->ops->setup = PCSetUp_Redistribute;
340: pc->ops->destroy = PCDestroy_Redistribute;
341: pc->ops->setfromoptions = PCSetFromOptions_Redistribute;
342: pc->ops->view = PCView_Redistribute;
344: KSPCreate(PetscObjectComm((PetscObject)pc),&red->ksp);
345: KSPSetErrorIfNotConverged(red->ksp,pc->erroriffailure);
346: PetscObjectIncrementTabLevel((PetscObject)red->ksp,(PetscObject)pc,1);
347: PetscLogObjectParent((PetscObject)pc,(PetscObject)red->ksp);
348: PCGetOptionsPrefix(pc,&prefix);
349: KSPSetOptionsPrefix(red->ksp,prefix);
350: KSPAppendOptionsPrefix(red->ksp,"redistribute_");
351: return(0);
352: }