Actual source code: ibcgs.c

petsc-3.8.4 2018-03-24
Report Typos and Errors

  2:  #include <petsc/private/kspimpl.h>
  3:  #include <petsc/private/vecimpl.h>

  5: static PetscErrorCode KSPSetUp_IBCGS(KSP ksp)
  6: {
  8:   PetscBool      diagonalscale;

 11:   PCGetDiagonalScale(ksp->pc,&diagonalscale);
 12:   if (diagonalscale) SETERRQ1(PetscObjectComm((PetscObject)ksp),PETSC_ERR_SUP,"Krylov method %s does not support diagonal scaling",((PetscObject)ksp)->type_name);
 13:   KSPSetWorkVecs(ksp,9);
 14:   return(0);
 15: }

 17: /*
 18:     The code below "cheats" from PETSc style
 19:        1) VecRestoreArray() is called immediately after VecGetArray() and the array values are still accessed; the reason for the immediate
 20:           restore is that Vec operations are done on some of the vectors during the solve and if we did not restore immediately it would
 21:           generate two VecGetArray() (the second one inside the Vec operation) calls without a restore between them.
 22:        2) The vector operations on done directly on the arrays instead of with VecXXXX() calls

 24:        For clarity in the code we name single VECTORS with two names, for example, Rn_1 and R, but they actually always
 25:      the exact same memory. We do this with macro defines so that compiler won't think they are
 26:      two different variables.

 28: */
 29: #define Xn_1 Xn
 30: #define xn_1 xn
 31: #define Rn_1 Rn
 32: #define rn_1 rn
 33: #define Un_1 Un
 34: #define un_1 un
 35: #define Vn_1 Vn
 36: #define vn_1 vn
 37: #define Qn_1 Qn
 38: #define qn_1 qn
 39: #define Zn_1 Zn
 40: #define zn_1 zn
 41: static PetscErrorCode  KSPSolve_IBCGS(KSP ksp)
 42: {
 44:   PetscInt       i,N;
 45:   PetscReal      rnorm,rnormin = 0.0;
 46: #if defined(PETSC_HAVE_MPI_LONG_DOUBLE) && !defined(PETSC_USE_COMPLEX) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
 47:   /* Because of possible instabilities in the algorithm (as indicated by different residual histories for the same problem
 48:      on the same number of processes  with different runs) we support computing the inner products using Intel's 80 bit arithematic
 49:      rather than just 64 bit. Thus we copy our double precision values into long doubles (hoping this keeps the 16 extra bits)
 50:      and tell MPI to do its ALlreduces with MPI_LONG_DOUBLE.

 52:      Note for developers that does not effect the code. Intel's long double is implemented by storing the 80 bits of extended double
 53:      precision into a 16 byte space (the rest of the space is ignored)  */
 54:   long double insums[7],outsums[7];
 55: #else
 56:   PetscScalar insums[7],outsums[7];
 57: #endif
 58:   PetscScalar                       sigman_2, sigman_1, sigman, pin_1, pin, phin_1, phin,tmp1,tmp2;
 59:   PetscScalar                       taun_1, taun, rhon, alphan_1, alphan, omegan_1, omegan;
 60:   const PetscScalar *PETSC_RESTRICT r0, *PETSC_RESTRICT f0, *PETSC_RESTRICT qn, *PETSC_RESTRICT b, *PETSC_RESTRICT un;
 61:   PetscScalar *PETSC_RESTRICT       rn, *PETSC_RESTRICT xn, *PETSC_RESTRICT vn, *PETSC_RESTRICT zn;
 62:   /* the rest do not have to keep n_1 values */
 63:   PetscScalar                       kappan, thetan, etan, gamman, betan, deltan;
 64:   const PetscScalar *PETSC_RESTRICT tn;
 65:   PetscScalar *PETSC_RESTRICT       sn;
 66:   Vec                               R0,Rn,Xn,F0,Vn,Zn,Qn,Tn,Sn,B,Un;
 67:   Mat                               A;

 70:   if (!ksp->vec_rhs->petscnative) SETERRQ(PetscObjectComm((PetscObject)ksp),PETSC_ERR_SUP,"Only coded for PETSc vectors");

 72:  #if defined(PETSC_HAVE_MPI_LONG_DOUBLE) && !defined(PETSC_USE_COMPLEX) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
 73:   /* since 80 bit long doubls do not fill the upper bits, we fill them initially so that
 74:      valgrind won't detect MPI_Allreduce() with uninitialized data */
 75:   PetscMemzero(insums,sizeof(insums));
 76:   PetscMemzero(insums,sizeof(insums));
 77: #endif

 79:   PCGetOperators(ksp->pc,&A,NULL);
 80:   VecGetLocalSize(ksp->vec_sol,&N);
 81:   Xn   = ksp->vec_sol; VecGetArray(Xn_1,(PetscScalar**)&xn_1); VecRestoreArray(Xn_1,NULL);
 82:   B    = ksp->vec_rhs; VecGetArrayRead(B,(const PetscScalar**)&b); VecRestoreArrayRead(B,NULL);
 83:   R0   = ksp->work[0]; VecGetArrayRead(R0,(const PetscScalar**)&r0); VecRestoreArrayRead(R0,NULL);
 84:   Rn   = ksp->work[1]; VecGetArray(Rn_1,(PetscScalar**)&rn_1); VecRestoreArray(Rn_1,NULL);
 85:   Un   = ksp->work[2]; VecGetArrayRead(Un_1,(const PetscScalar**)&un_1); VecRestoreArrayRead(Un_1,NULL);
 86:   F0   = ksp->work[3]; VecGetArrayRead(F0,(const PetscScalar**)&f0); VecRestoreArrayRead(F0,NULL);
 87:   Vn   = ksp->work[4]; VecGetArray(Vn_1,(PetscScalar**)&vn_1); VecRestoreArray(Vn_1,NULL);
 88:   Zn   = ksp->work[5]; VecGetArray(Zn_1,(PetscScalar**)&zn_1); VecRestoreArray(Zn_1,NULL);
 89:   Qn   = ksp->work[6]; VecGetArrayRead(Qn_1,(const PetscScalar**)&qn_1); VecRestoreArrayRead(Qn_1,NULL);
 90:   Tn   = ksp->work[7]; VecGetArrayRead(Tn,(const PetscScalar**)&tn); VecRestoreArrayRead(Tn,NULL);
 91:   Sn   = ksp->work[8]; VecGetArrayRead(Sn,(const PetscScalar**)&sn); VecRestoreArrayRead(Sn,NULL);

 93:   /* r0 = rn_1 = b - A*xn_1; */
 94:   /* KSP_PCApplyBAorAB(ksp,Xn_1,Rn_1,Tn);
 95:      VecAYPX(Rn_1,-1.0,B); */
 96:   KSPInitialResidual(ksp,Xn_1,Tn,Sn,Rn_1,B);

 98:   VecNorm(Rn_1,NORM_2,&rnorm);
 99:   KSPMonitor(ksp,0,rnorm);
100:   (*ksp->converged)(ksp,0,rnorm,&ksp->reason,ksp->cnvP);
101:   if (ksp->reason) return(0);

103:   VecCopy(Rn_1,R0);

105:   /* un_1 = A*rn_1; */
106:   KSP_PCApplyBAorAB(ksp,Rn_1,Un_1,Tn);

108:   /* f0   = A'*rn_1; */
109:   if (ksp->pc_side == PC_RIGHT) { /* B' A' */
110:     KSP_MatMultTranspose(ksp,A,R0,Tn);
111:     KSP_PCApplyTranspose(ksp,Tn,F0);
112:   } else if (ksp->pc_side == PC_LEFT) { /* A' B' */
113:     KSP_PCApplyTranspose(ksp,R0,Tn);
114:     KSP_MatMultTranspose(ksp,A,Tn,F0);
115:   }

117:   /*qn_1 = vn_1 = zn_1 = 0.0; */
118:   VecSet(Qn_1,0.0);
119:   VecSet(Vn_1,0.0);
120:   VecSet(Zn_1,0.0);

122:   sigman_2 = pin_1 = taun_1 = 0.0;

124:   /* the paper says phin_1 should be initialized to zero, it is actually R0'R0 */
125:   VecDot(R0,R0,&phin_1);

127:   /* sigman_1 = rn_1'un_1  */
128:   VecDot(R0,Un_1,&sigman_1);

130:   alphan_1 = omegan_1 = 1.0;

132:   for (ksp->its = 1; ksp->its<ksp->max_it+1; ksp->its++) {
133:     rhon = phin_1 - omegan_1*sigman_2 + omegan_1*alphan_1*pin_1;
134:     if (ksp->its == 1) deltan = rhon;
135:     else deltan = rhon/taun_1;
136:     betan = deltan/omegan_1;
137:     taun  = sigman_1 + betan*taun_1  - deltan*pin_1;
138:     if (taun == 0.0) {
139:       if (ksp->errorifnotconverged) SETERRQ1(PetscObjectComm((PetscObject)ksp),PETSC_ERR_NOT_CONVERGED,"KSPSolve has not converged due to taun is zero, iteration %D",ksp->its);
140:       else {
141:         ksp->reason = KSP_DIVERGED_NANORINF;
142:         return(0);
143:       }
144:     }
145:     alphan = rhon/taun;
146:     PetscLogFlops(15.0);

148:     /*
149:         zn = alphan*rn_1 + (alphan/alphan_1)betan*zn_1 - alphan*deltan*vn_1
150:         vn = un_1 + betan*vn_1 - deltan*qn_1
151:         sn = rn_1 - alphan*vn

153:        The algorithm in the paper is missing the alphan/alphan_1 term in the zn update
154:     */
155:     PetscLogEventBegin(VEC_Ops,0,0,0,0);
156:     tmp1 = (alphan/alphan_1)*betan;
157:     tmp2 = alphan*deltan;
158:     for (i=0; i<N; i++) {
159:       zn[i] = alphan*rn_1[i] + tmp1*zn_1[i] - tmp2*vn_1[i];
160:       vn[i] = un_1[i] + betan*vn_1[i] - deltan*qn_1[i];
161:       sn[i] = rn_1[i] - alphan*vn[i];
162:     }
163:     PetscLogFlops(3.0+11.0*N);
164:     PetscLogEventEnd(VEC_Ops,0,0,0,0);

166:     /*
167:         qn = A*vn
168:     */
169:     KSP_PCApplyBAorAB(ksp,Vn,Qn,Tn);

171:     /*
172:         tn = un_1 - alphan*qn
173:     */
174:     VecWAXPY(Tn,-alphan,Qn,Un_1);


177:     /*
178:         phin = r0'sn
179:         pin  = r0'qn
180:         gamman = f0'sn
181:         etan   = f0'tn
182:         thetan = sn'tn
183:         kappan = tn'tn
184:     */
185:     PetscLogEventBegin(VEC_ReduceArithmetic,0,0,0,0);
186:     phin = pin = gamman = etan = thetan = kappan = 0.0;
187:     for (i=0; i<N; i++) {
188:       phin   += r0[i]*sn[i];
189:       pin    += r0[i]*qn[i];
190:       gamman += f0[i]*sn[i];
191:       etan   += f0[i]*tn[i];
192:       thetan += sn[i]*tn[i];
193:       kappan += tn[i]*tn[i];
194:     }
195:     PetscLogFlops(12.0*N);
196:     PetscLogEventEnd(VEC_ReduceArithmetic,0,0,0,0);

198:     insums[0] = phin;
199:     insums[1] = pin;
200:     insums[2] = gamman;
201:     insums[3] = etan;
202:     insums[4] = thetan;
203:     insums[5] = kappan;
204:     insums[6] = rnormin;

206:     PetscLogEventBarrierBegin(VEC_ReduceBarrier,0,0,0,0,PetscObjectComm((PetscObject)ksp));
207: #if defined(PETSC_HAVE_MPI_LONG_DOUBLE) && !defined(PETSC_USE_COMPLEX) && (defined(PETSC_USE_REAL_SINGLE) || defined(PETSC_USE_REAL_DOUBLE))
208:     if (ksp->lagnorm && ksp->its > 1) {
209:       MPIU_Allreduce(insums,outsums,7,MPI_LONG_DOUBLE,MPI_SUM,PetscObjectComm((PetscObject)ksp));
210:     } else {
211:       MPIU_Allreduce(insums,outsums,6,MPI_LONG_DOUBLE,MPI_SUM,PetscObjectComm((PetscObject)ksp));
212:     }
213: #else
214:     if (ksp->lagnorm && ksp->its > 1) {
215:       MPIU_Allreduce(insums,outsums,7,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)ksp));
216:     } else {
217:       MPIU_Allreduce(insums,outsums,6,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)ksp));
218:     }
219: #endif
220:     PetscLogEventBarrierEnd(VEC_ReduceBarrier,0,0,0,0,PetscObjectComm((PetscObject)ksp));
221:     phin   = outsums[0];
222:     pin    = outsums[1];
223:     gamman = outsums[2];
224:     etan   = outsums[3];
225:     thetan = outsums[4];
226:     kappan = outsums[5];
227:     if (ksp->lagnorm && ksp->its > 1) rnorm = PetscSqrtReal(PetscRealPart(outsums[6]));

229:     if (kappan == 0.0) {
230:       if (ksp->errorifnotconverged) SETERRQ1(PetscObjectComm((PetscObject)ksp),PETSC_ERR_NOT_CONVERGED,"KSPSolve has not converged due to kappan is zero, iteration %D",ksp->its);
231:       else {
232:         ksp->reason = KSP_DIVERGED_NANORINF;
233:         return(0);
234:       }
235:     }
236:     if (thetan == 0.0) {
237:       if (ksp->errorifnotconverged) SETERRQ1(PetscObjectComm((PetscObject)ksp),PETSC_ERR_NOT_CONVERGED,"KSPSolve has not converged due to thetan is zero, iteration %D",ksp->its);
238:       else {
239:         ksp->reason = KSP_DIVERGED_NANORINF;
240:         return(0);
241:       }
242:     }
243:     omegan = thetan/kappan;
244:     sigman = gamman - omegan*etan;

246:     /*
247:         rn = sn - omegan*tn
248:         xn = xn_1 + zn + omegan*sn
249:     */
250:     PetscLogEventBegin(VEC_Ops,0,0,0,0);
251:     rnormin = 0.0;
252:     for (i=0; i<N; i++) {
253:       rn[i]    = sn[i] - omegan*tn[i];
254:       rnormin += PetscRealPart(PetscConj(rn[i])*rn[i]);
255:       xn[i]   += zn[i] + omegan*sn[i];
256:     }
257:     PetscObjectStateIncrease((PetscObject)Xn);
258:     PetscLogFlops(7.0*N);
259:     PetscLogEventEnd(VEC_Ops,0,0,0,0);

261:     if (!ksp->lagnorm && ksp->chknorm < ksp->its) {
262:       PetscLogEventBarrierBegin(VEC_ReduceBarrier,0,0,0,0,PetscObjectComm((PetscObject)ksp));
263:       MPIU_Allreduce(&rnormin,&rnorm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)ksp));
264:       PetscLogEventBarrierEnd(VEC_ReduceBarrier,0,0,0,0,PetscObjectComm((PetscObject)ksp));
265:       rnorm = PetscSqrtReal(rnorm);
266:     }

268:     /* Test for convergence */
269:     KSPMonitor(ksp,ksp->its,rnorm);
270:     (*ksp->converged)(ksp,ksp->its,rnorm,&ksp->reason,ksp->cnvP);
271:     if (ksp->reason) break;

273:     /* un = A*rn */
274:     KSP_PCApplyBAorAB(ksp,Rn,Un,Tn);

276:     /* Update n-1 locations with n locations */
277:     sigman_2 = sigman_1;
278:     sigman_1 = sigman;
279:     pin_1    = pin;
280:     phin_1   = phin;
281:     alphan_1 = alphan;
282:     taun_1   = taun;
283:     omegan_1 = omegan;
284:   }
285:   if (ksp->its >= ksp->max_it) ksp->reason = KSP_DIVERGED_ITS;
286:   KSPUnwindPreconditioner(ksp,Xn,Tn);
287:   return(0);
288: }


291: /*MC
292:      KSPIBCGS - Implements the IBiCGStab (Improved Stabilized version of BiConjugate Gradient) method
293:             in an alternative form to have only a single global reduction operation instead of the usual 3 (or 4)

295:    Options Database Keys:
296: .   see KSPSolve()

298:    Level: beginner

300:    Notes: Supports left and right preconditioning

302:           See KSPBCGSL for additional stabilization

304:           Unlike the Bi-CG-stab algorithm, this requires one multiplication be the transpose of the operator
305:            before the iteration starts.

307:           The paper has two errors in the algorithm presented, they are fixed in the code in KSPSolve_IBCGS()

309:           For maximum reduction in the number of global reduction operations, this solver should be used with
310:           KSPSetLagNorm().

312:           This is not supported for complex numbers.

314:    Reference: The Improved BiCGStab Method for Large and Sparse Unsymmetric Linear Systems on Parallel Distributed Memory
315:                      Architectures. L. T. Yang and R. Brent, Proceedings of the Fifth International Conference on Algorithms and
316:                      Architectures for Parallel Processing, 2002, IEEE.

318: .seealso:  KSPCreate(), KSPSetType(), KSPType (for list of available types), KSP, KSPBICG, KSPBCGSL, KSPIBCGS, KSPSetLagNorm()
319: M*/

321: PETSC_EXTERN PetscErrorCode KSPCreate_IBCGS(KSP ksp)
322: {

326:   ksp->data = (void*)0;

328:   KSPSetSupportedNorm(ksp,KSP_NORM_PRECONDITIONED,PC_LEFT,3);
329:   KSPSetSupportedNorm(ksp,KSP_NORM_UNPRECONDITIONED,PC_RIGHT,2);

331:   ksp->ops->setup          = KSPSetUp_IBCGS;
332:   ksp->ops->solve          = KSPSolve_IBCGS;
333:   ksp->ops->destroy        = KSPDestroyDefault;
334:   ksp->ops->buildsolution  = KSPBuildSolutionDefault;
335:   ksp->ops->buildresidual  = KSPBuildResidualDefault;
336:   ksp->ops->setfromoptions = 0;
337:   ksp->ops->view           = 0;
338: #if defined(PETSC_USE_COMPLEX)
339:   SETERRQ(PetscObjectComm((PetscObject)ksp),PETSC_ERR_SUP,"This is not supported for complex numbers");
340: #endif
341:   return(0);
342: }