Actual source code: dgefa4.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:        Inverts 4 by 4 matrix using partial pivoting.

  5:        Used by the sparse factorization routines in 
  6:      src/mat/impls/baij/seq

  8:        This is a combination of the Linpack routines
  9:     dgefa() and dgedi() specialized for a size of 4.

 11: */
 12: #include <petscsys.h>

 16: PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift)
 17: {
 18:     PetscInt   i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3;
 19:     PetscInt   k4,j3;
 20:     MatScalar  *aa,*ax,*ay,work[16],stmp;
 21:     MatReal    tmp,max;

 23: /*     gaussian elimination with partial pivoting */

 26:     shift = .25*shift*(1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15]));
 27:     /* Parameter adjustments */
 28:     a       -= 5;

 30:     for (k = 1; k <= 3; ++k) {
 31:         kp1 = k + 1;
 32:         k3  = 4*k;
 33:         k4  = k3 + k;
 34: /*        find l = pivot index */

 36:         i__2 = 5 - k;
 37:         aa = &a[k4];
 38:         max = PetscAbsScalar(aa[0]);
 39:         l = 1;
 40:         for (ll=1; ll<i__2; ll++) {
 41:           tmp = PetscAbsScalar(aa[ll]);
 42:           if (tmp > max) { max = tmp; l = ll+1;}
 43:         }
 44:         l       += k - 1;
 45:         ipvt[k-1] = l;

 47:         if (a[l + k3] == 0.0) {
 48:           if (shift == 0.0) {
 49:             SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1);
 50:             } else {
 51:             /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */
 52:               a[l + k3] = shift;
 53:             }
 54:         }

 56: /*           interchange if necessary */

 58:         if (l != k) {
 59:           stmp      = a[l + k3];
 60:           a[l + k3] = a[k4];
 61:           a[k4]     = stmp;
 62:         }

 64: /*           compute multipliers */

 66:         stmp = -1. / a[k4];
 67:         i__2 = 4 - k;
 68:         aa = &a[1 + k4];
 69:         for (ll=0; ll<i__2; ll++) {
 70:           aa[ll] *= stmp;
 71:         }

 73: /*           row elimination with column indexing */

 75:         ax = &a[k4+1];
 76:         for (j = kp1; j <= 4; ++j) {
 77:             j3   = 4*j;
 78:             stmp = a[l + j3];
 79:             if (l != k) {
 80:               a[l + j3] = a[k + j3];
 81:               a[k + j3] = stmp;
 82:             }

 84:             i__3 = 4 - k;
 85:             ay = &a[1+k+j3];
 86:             for (ll=0; ll<i__3; ll++) {
 87:               ay[ll] += stmp*ax[ll];
 88:             }
 89:         }
 90:     }
 91:     ipvt[3] = 4;
 92:     if (a[20] == 0.0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3);

 94:     /*
 95:          Now form the inverse 
 96:     */

 98:    /*     compute inverse(u) */

100:     for (k = 1; k <= 4; ++k) {
101:         k3    = 4*k;
102:         k4    = k3 + k;
103:         a[k4] = 1.0 / a[k4];
104:         stmp  = -a[k4];
105:         i__2  = k - 1;
106:         aa    = &a[k3 + 1];
107:         for (ll=0; ll<i__2; ll++) aa[ll] *= stmp;
108:         kp1 = k + 1;
109:         if (4 < kp1) continue;
110:         ax = aa;
111:         for (j = kp1; j <= 4; ++j) {
112:             j3        = 4*j;
113:             stmp      = a[k + j3];
114:             a[k + j3] = 0.0;
115:             ay        = &a[j3 + 1];
116:             for (ll=0; ll<k; ll++) {
117:               ay[ll] += stmp*ax[ll];
118:             }
119:         }
120:     }

122:    /*    form inverse(u)*inverse(l) */

124:     for (kb = 1; kb <= 3; ++kb) {
125:         k   = 4 - kb;
126:         k3  = 4*k;
127:         kp1 = k + 1;
128:         aa  = a + k3;
129:         for (i = kp1; i <= 4; ++i) {
130:             work[i-1] = aa[i];
131:             aa[i]   = 0.0;
132:         }
133:         for (j = kp1; j <= 4; ++j) {
134:             stmp  = work[j-1];
135:             ax    = &a[4*j + 1];
136:             ay    = &a[k3 + 1];
137:             ay[0] += stmp*ax[0];
138:             ay[1] += stmp*ax[1];
139:             ay[2] += stmp*ax[2];
140:             ay[3] += stmp*ax[3];
141:         }
142:         l = ipvt[k-1];
143:         if (l != k) {
144:             ax = &a[k3 + 1];
145:             ay = &a[4*l + 1];
146:             stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp;
147:             stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp;
148:             stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp;
149:             stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp;
150:         }
151:     }
152:     return(0);
153: }

155: #if defined(PETSC_HAVE_SSE)
156: #include PETSC_HAVE_SSE

160: PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a)
161: {
162:   /* 
163:      This routine is converted from Intel's Small Matrix Library.
164:      See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
165:      Order Number: 245043-001
166:      March 1999
167:      http://www.intel.com

169:      Inverse of a 4x4 matrix via Kramer's Rule:
170:      bool Invert4x4(SMLXMatrix &);
171:   */

174:   SSE_SCOPE_BEGIN;
175:     SSE_INLINE_BEGIN_1(a)

177: /* ----------------------------------------------- */

179:       SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
180:       SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0)

182:       SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
183:       SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5)

185:       SSE_COPY_PS(XMM3,XMM0)
186:       SSE_SHUFFLE(XMM3,XMM5,0x88)

188:       SSE_SHUFFLE(XMM5,XMM0,0xDD)

190:       SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0)
191:       SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0)

193:       SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6)
194:       SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6)

196:       SSE_COPY_PS(XMM4,XMM0)
197:       SSE_SHUFFLE(XMM4,XMM6,0x88)

199:       SSE_SHUFFLE(XMM6,XMM0,0xDD)

201: /* ----------------------------------------------- */

203:       SSE_COPY_PS(XMM7,XMM4)
204:       SSE_MULT_PS(XMM7,XMM6)

206:       SSE_SHUFFLE(XMM7,XMM7,0xB1)

208:       SSE_COPY_PS(XMM0,XMM5)
209:       SSE_MULT_PS(XMM0,XMM7)

211:       SSE_COPY_PS(XMM2,XMM3)
212:       SSE_MULT_PS(XMM2,XMM7)

214:       SSE_SHUFFLE(XMM7,XMM7,0x4E)

216:       SSE_COPY_PS(XMM1,XMM5)
217:       SSE_MULT_PS(XMM1,XMM7)
218:       SSE_SUB_PS(XMM1,XMM0)

220:       SSE_MULT_PS(XMM7,XMM3)
221:       SSE_SUB_PS(XMM7,XMM2)

223:       SSE_SHUFFLE(XMM7,XMM7,0x4E)
224:       SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7)

226: /* ----------------------------------------------- */

228:       SSE_COPY_PS(XMM0,XMM5)
229:       SSE_MULT_PS(XMM0,XMM4)

231:       SSE_SHUFFLE(XMM0,XMM0,0xB1)

233:       SSE_COPY_PS(XMM2,XMM6)
234:       SSE_MULT_PS(XMM2,XMM0)
235:       SSE_ADD_PS(XMM2,XMM1)
236: 
237:       SSE_COPY_PS(XMM7,XMM3)
238:       SSE_MULT_PS(XMM7,XMM0)

240:       SSE_SHUFFLE(XMM0,XMM0,0x4E)

242:       SSE_COPY_PS(XMM1,XMM6)
243:       SSE_MULT_PS(XMM1,XMM0)
244:       SSE_SUB_PS(XMM2,XMM1)

246:       SSE_MULT_PS(XMM0,XMM3)
247:       SSE_SUB_PS(XMM0,XMM7)

249:       SSE_SHUFFLE(XMM0,XMM0,0x4E)
250:       SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0)

252:       /* ----------------------------------------------- */

254:       SSE_COPY_PS(XMM7,XMM5)
255:       SSE_SHUFFLE(XMM7,XMM5,0x4E)
256:       SSE_MULT_PS(XMM7,XMM6)

258:       SSE_SHUFFLE(XMM7,XMM7,0xB1)

260:       SSE_SHUFFLE(XMM4,XMM4,0x4E)

262:       SSE_COPY_PS(XMM0,XMM4)
263:       SSE_MULT_PS(XMM0,XMM7)
264:       SSE_ADD_PS(XMM0,XMM2)

266:       SSE_COPY_PS(XMM2,XMM3)
267:       SSE_MULT_PS(XMM2,XMM7)

269:       SSE_SHUFFLE(XMM7,XMM7,0x4E)

271:       SSE_COPY_PS(XMM1,XMM4)
272:       SSE_MULT_PS(XMM1,XMM7)
273:       SSE_SUB_PS(XMM0,XMM1)
274:       SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0)

276:       SSE_MULT_PS(XMM7,XMM3)
277:       SSE_SUB_PS(XMM7,XMM2)

279:       SSE_SHUFFLE(XMM7,XMM7,0x4E)

281:       /* ----------------------------------------------- */

283:       SSE_COPY_PS(XMM1,XMM3)
284:       SSE_MULT_PS(XMM1,XMM5)

286:       SSE_SHUFFLE(XMM1,XMM1,0xB1)

288:       SSE_COPY_PS(XMM0,XMM6)
289:       SSE_MULT_PS(XMM0,XMM1)
290:       SSE_ADD_PS(XMM0,XMM7)
291: 
292:       SSE_COPY_PS(XMM2,XMM4)
293:       SSE_MULT_PS(XMM2,XMM1)
294:       SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12)

296:       SSE_SHUFFLE(XMM1,XMM1,0x4E)

298:       SSE_COPY_PS(XMM7,XMM6)
299:       SSE_MULT_PS(XMM7,XMM1)
300:       SSE_SUB_PS(XMM7,XMM0)

302:       SSE_MULT_PS(XMM1,XMM4)
303:       SSE_SUB_PS(XMM2,XMM1)
304:       SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2)

306:       /* ----------------------------------------------- */

308:       SSE_COPY_PS(XMM1,XMM3)
309:       SSE_MULT_PS(XMM1,XMM6)

311:       SSE_SHUFFLE(XMM1,XMM1,0xB1)

313:       SSE_COPY_PS(XMM2,XMM4)
314:       SSE_MULT_PS(XMM2,XMM1)
315:       SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0)
316:       SSE_SUB_PS(XMM0,XMM2)

318:       SSE_COPY_PS(XMM2,XMM5)
319:       SSE_MULT_PS(XMM2,XMM1)
320:       SSE_ADD_PS(XMM2,XMM7)

322:       SSE_SHUFFLE(XMM1,XMM1,0x4E)

324:       SSE_COPY_PS(XMM7,XMM4)
325:       SSE_MULT_PS(XMM7,XMM1)
326:       SSE_ADD_PS(XMM7,XMM0)

328:       SSE_MULT_PS(XMM1,XMM5)
329:       SSE_SUB_PS(XMM2,XMM1)

331:       /* ----------------------------------------------- */

333:       SSE_MULT_PS(XMM4,XMM3)

335:       SSE_SHUFFLE(XMM4,XMM4,0xB1)

337:       SSE_COPY_PS(XMM1,XMM6)
338:       SSE_MULT_PS(XMM1,XMM4)
339:       SSE_ADD_PS(XMM1,XMM7)

341:       SSE_COPY_PS(XMM0,XMM5)
342:       SSE_MULT_PS(XMM0,XMM4)
343:       SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7)
344:       SSE_SUB_PS(XMM7,XMM0)

346:       SSE_SHUFFLE(XMM4,XMM4,0x4E)

348:       SSE_MULT_PS(XMM6,XMM4)
349:       SSE_SUB_PS(XMM1,XMM6)

351:       SSE_MULT_PS(XMM5,XMM4)
352:       SSE_ADD_PS(XMM5,XMM7)

354:       /* ----------------------------------------------- */

356:       SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0)
357:       SSE_MULT_PS(XMM3,XMM0)

359:       SSE_COPY_PS(XMM4,XMM3)
360:       SSE_SHUFFLE(XMM4,XMM3,0x4E)
361:       SSE_ADD_PS(XMM4,XMM3)

363:       SSE_COPY_PS(XMM6,XMM4)
364:       SSE_SHUFFLE(XMM6,XMM4,0xB1)
365:       SSE_ADD_SS(XMM6,XMM4)

367:       SSE_COPY_PS(XMM3,XMM6)
368:       SSE_RECIP_SS(XMM3,XMM6)
369:       SSE_COPY_SS(XMM4,XMM3)
370:       SSE_ADD_SS(XMM4,XMM3)
371:       SSE_MULT_SS(XMM3,XMM3)
372:       SSE_MULT_SS(XMM6,XMM3)
373:       SSE_SUB_SS(XMM4,XMM6)

375:       SSE_SHUFFLE(XMM4,XMM4,0x00)

377:       SSE_MULT_PS(XMM0,XMM4)
378:       SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0)
379:       SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0)

381:       SSE_MULT_PS(XMM1,XMM4)
382:       SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1)
383:       SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1)

385:       SSE_MULT_PS(XMM2,XMM4)
386:       SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2)
387:       SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2)

389:       SSE_MULT_PS(XMM4,XMM5)
390:       SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
391:       SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)

393:       /* ----------------------------------------------- */

395:       SSE_INLINE_END_1;
396:   SSE_SCOPE_END;

398:   return(0);
399: }

401: #endif