Actual source code: mmdense.c
1: /*
2: Support for the parallel dense matrix vector multiply
3: */
4: #include <../src/mat/impls/dense/mpi/mpidense.h>
5: #include <petscblaslapack.h>
7: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
8: {
9: Mat_MPIDense *mdn = (Mat_MPIDense *)mat->data;
11: PetscFunctionBegin;
12: if (!mdn->Mvctx) {
13: /* Create local vector that is used to scatter into */
14: PetscCall(VecDestroy(&mdn->lvec));
15: if (mdn->A) { PetscCall(MatCreateVecs(mdn->A, &mdn->lvec, NULL)); }
16: PetscCall(PetscLayoutSetUp(mat->cmap));
17: PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)mat), &mdn->Mvctx));
18: PetscCall(PetscSFSetGraphWithPattern(mdn->Mvctx, mat->cmap, PETSCSF_PATTERN_ALLGATHER));
19: }
20: PetscFunctionReturn(PETSC_SUCCESS);
21: }
23: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *);
25: PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
26: {
27: PetscInt nmax, nstages_local, nstages, i, pos, max_no;
29: PetscFunctionBegin;
30: /* Allocate memory to hold all the submatrices */
31: if (scall != MAT_REUSE_MATRIX) PetscCall(PetscCalloc1(ismax + 1, submat));
32: /* Determine the number of stages through which submatrices are done */
33: nmax = 20 * 1000000 / (C->cmap->N * sizeof(PetscInt));
34: if (!nmax) nmax = 1;
35: nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0);
37: /* Make sure every processor loops through the nstages */
38: PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
40: for (i = 0, pos = 0; i < nstages; i++) {
41: if (pos + nmax <= ismax) max_no = nmax;
42: else if (pos == ismax) max_no = 0;
43: else max_no = ismax - pos;
44: PetscCall(MatCreateSubMatrices_MPIDense_Local(C, max_no, isrow + pos, iscol + pos, scall, *submat + pos));
45: pos += max_no;
46: }
47: PetscFunctionReturn(PETSC_SUCCESS);
48: }
50: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
51: {
52: Mat_MPIDense *c = (Mat_MPIDense *)C->data;
53: Mat A = c->A;
54: Mat_SeqDense *a = (Mat_SeqDense *)A->data, *mat;
55: PetscMPIInt rank, size, tag0, tag1, idex, end, i;
56: PetscInt N = C->cmap->N, rstart = C->rmap->rstart, count;
57: const PetscInt **irow, **icol, *irow_i;
58: PetscInt *nrow, *ncol, *w1, *w3, *w4, *rtable, start;
59: PetscInt **sbuf1, m, j, k, l, ct1, **rbuf1, row, proc;
60: PetscInt nrqs, msz, **ptr, *ctr, *pa, *tmp, bsz, nrqr;
61: PetscInt is_no, jmax, **rmap, *rmap_i;
62: PetscInt ctr_j, *sbuf1_j, *rbuf1_i;
63: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2;
64: MPI_Status *r_status1, *r_status2, *s_status1, *s_status2;
65: MPI_Comm comm;
66: PetscScalar **rbuf2, **sbuf2;
67: PetscBool sorted;
69: PetscFunctionBegin;
70: PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
71: tag0 = ((PetscObject)C)->tag;
72: PetscCallMPI(MPI_Comm_rank(comm, &rank));
73: PetscCallMPI(MPI_Comm_size(comm, &size));
74: m = C->rmap->N;
76: /* Get some new tags to keep the communication clean */
77: PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
79: /* Check if the col indices are sorted */
80: for (i = 0; i < ismax; i++) {
81: PetscCall(ISSorted(isrow[i], &sorted));
82: PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "ISrow is not sorted");
83: PetscCall(ISSorted(iscol[i], &sorted));
84: PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "IScol is not sorted");
85: }
87: PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, m, &rtable));
88: for (i = 0; i < ismax; i++) {
89: PetscCall(ISGetIndices(isrow[i], &irow[i]));
90: PetscCall(ISGetIndices(iscol[i], &icol[i]));
91: PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
92: PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));
93: }
95: /* Create hash table for the mapping :row -> proc*/
96: for (i = 0, j = 0; i < size; i++) {
97: jmax = C->rmap->range[i + 1];
98: for (; j < jmax; j++) rtable[j] = i;
99: }
101: /* evaluate communication - mesg to who,length of mesg, and buffer space
102: required. Based on this, buffers are allocated, and data copied into them*/
103: PetscCall(PetscMalloc3(2 * size, &w1, size, &w3, size, &w4));
104: PetscCall(PetscArrayzero(w1, size * 2)); /* initialize work vector*/
105: PetscCall(PetscArrayzero(w3, size)); /* initialize work vector*/
106: for (i = 0; i < ismax; i++) {
107: PetscCall(PetscArrayzero(w4, size)); /* initialize work vector*/
108: jmax = nrow[i];
109: irow_i = irow[i];
110: for (j = 0; j < jmax; j++) {
111: row = irow_i[j];
112: proc = rtable[row];
113: w4[proc]++;
114: }
115: for (j = 0; j < size; j++) {
116: if (w4[j]) {
117: w1[2 * j] += w4[j];
118: w3[j]++;
119: }
120: }
121: }
123: nrqs = 0; /* no of outgoing messages */
124: msz = 0; /* total mesg length (for all procs) */
125: w1[2 * rank] = 0; /* no mesg sent to self */
126: w3[rank] = 0;
127: for (i = 0; i < size; i++) {
128: if (w1[2 * i]) {
129: w1[2 * i + 1] = 1;
130: nrqs++;
131: } /* there exists a message to proc i */
132: }
133: PetscCall(PetscMalloc1(nrqs + 1, &pa)); /*(proc -array)*/
134: for (i = 0, j = 0; i < size; i++) {
135: if (w1[2 * i]) {
136: pa[j] = i;
137: j++;
138: }
139: }
141: /* Each message would have a header = 1 + 2*(no of IS) + data */
142: for (i = 0; i < nrqs; i++) {
143: j = pa[i];
144: w1[2 * j] += w1[2 * j + 1] + 2 * w3[j];
145: msz += w1[2 * j];
146: }
147: /* Do a global reduction to determine how many messages to expect*/
148: PetscCall(PetscMaxSum(comm, w1, &bsz, &nrqr));
150: /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
151: PetscCall(PetscMalloc1(nrqr + 1, &rbuf1));
152: PetscCall(PetscMalloc1(nrqr * bsz, &rbuf1[0]));
153: for (i = 1; i < nrqr; ++i) rbuf1[i] = rbuf1[i - 1] + bsz;
155: /* Post the receives */
156: PetscCall(PetscMalloc1(nrqr + 1, &r_waits1));
157: for (i = 0; i < nrqr; ++i) PetscCallMPI(MPI_Irecv(rbuf1[i], bsz, MPIU_INT, MPI_ANY_SOURCE, tag0, comm, r_waits1 + i));
159: /* Allocate Memory for outgoing messages */
160: PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
161: PetscCall(PetscArrayzero(sbuf1, size));
162: PetscCall(PetscArrayzero(ptr, size));
163: {
164: PetscInt *iptr = tmp, ict = 0;
165: for (i = 0; i < nrqs; i++) {
166: j = pa[i];
167: iptr += ict;
168: sbuf1[j] = iptr;
169: ict = w1[2 * j];
170: }
171: }
173: /* Form the outgoing messages */
174: /* Initialize the header space */
175: for (i = 0; i < nrqs; i++) {
176: j = pa[i];
177: sbuf1[j][0] = 0;
178: PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
179: ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
180: }
182: /* Parse the isrow and copy data into outbuf */
183: for (i = 0; i < ismax; i++) {
184: PetscCall(PetscArrayzero(ctr, size));
185: irow_i = irow[i];
186: jmax = nrow[i];
187: for (j = 0; j < jmax; j++) { /* parse the indices of each IS */
188: row = irow_i[j];
189: proc = rtable[row];
190: if (proc != rank) { /* copy to the outgoing buf*/
191: ctr[proc]++;
192: *ptr[proc] = row;
193: ptr[proc]++;
194: }
195: }
196: /* Update the headers for the current IS */
197: for (j = 0; j < size; j++) { /* Can Optimise this loop too */
198: if ((ctr_j = ctr[j])) {
199: sbuf1_j = sbuf1[j];
200: k = ++sbuf1_j[0];
201: sbuf1_j[2 * k] = ctr_j;
202: sbuf1_j[2 * k - 1] = i;
203: }
204: }
205: }
207: /* Now post the sends */
208: PetscCall(PetscMalloc1(nrqs + 1, &s_waits1));
209: for (i = 0; i < nrqs; ++i) {
210: j = pa[i];
211: PetscCallMPI(MPI_Isend(sbuf1[j], w1[2 * j], MPIU_INT, j, tag0, comm, s_waits1 + i));
212: }
214: /* Post receives to capture the row_data from other procs */
215: PetscCall(PetscMalloc1(nrqs + 1, &r_waits2));
216: PetscCall(PetscMalloc1(nrqs + 1, &rbuf2));
217: for (i = 0; i < nrqs; i++) {
218: j = pa[i];
219: count = (w1[2 * j] - (2 * sbuf1[j][0] + 1)) * N;
220: PetscCall(PetscMalloc1(count + 1, &rbuf2[i]));
221: PetscCallMPI(MPI_Irecv(rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2 + i));
222: }
224: /* Receive messages(row_nos) and then, pack and send off the rowvalues
225: to the correct processors */
227: PetscCall(PetscMalloc1(nrqr + 1, &s_waits2));
228: PetscCall(PetscMalloc1(nrqr + 1, &r_status1));
229: PetscCall(PetscMalloc1(nrqr + 1, &sbuf2));
231: {
232: PetscScalar *sbuf2_i;
233: const PetscScalar *v_start, *v;
234: PetscInt s_proc;
236: PetscCall(MatDenseGetArrayRead(A, &v));
237: for (i = 0; i < nrqr; ++i) {
238: PetscCallMPI(MPI_Waitany(nrqr, r_waits1, &idex, r_status1 + i));
239: s_proc = r_status1[i].MPI_SOURCE; /* send processor */
240: rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */
241: /* no of rows = end - start; since start is array idex[], 0idex, whel end
242: is length of the buffer - which is 1idex */
243: start = 2 * rbuf1_i[0] + 1;
244: PetscCallMPI(MPI_Get_count(r_status1 + i, MPIU_INT, &end));
245: /* allocate memory sufficinet to hold all the row values */
246: PetscCall(PetscMalloc1((end - start) * N, &sbuf2[idex]));
247: sbuf2_i = sbuf2[idex];
248: /* Now pack the data */
249: for (j = start; j < end; j++) {
250: row = rbuf1_i[j] - rstart;
251: v_start = v + row;
252: for (k = 0; k < N; k++) {
253: sbuf2_i[0] = v_start[0];
254: sbuf2_i++;
255: v_start += a->lda;
256: }
257: }
258: /* Now send off the data */
259: PetscCallMPI(MPI_Isend(sbuf2[idex], (end - start) * N, MPIU_SCALAR, s_proc, tag1, comm, s_waits2 + i));
260: }
261: PetscCall(MatDenseRestoreArrayRead(A, &v));
262: }
263: /* End Send-Recv of IS + row_numbers */
264: PetscCall(PetscFree(r_status1));
265: PetscCall(PetscFree(r_waits1));
266: PetscCall(PetscMalloc1(nrqs + 1, &s_status1));
267: if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status1));
268: PetscCall(PetscFree(s_status1));
269: PetscCall(PetscFree(s_waits1));
271: /* Create the submatrices */
272: if (scall == MAT_REUSE_MATRIX) {
273: for (i = 0; i < ismax; i++) {
274: mat = (Mat_SeqDense *)submats[i]->data;
275: PetscCheck(!(submats[i]->rmap->n != nrow[i]) && !(submats[i]->cmap->n != ncol[i]), PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");
276: PetscCall(PetscArrayzero(mat->v, submats[i]->rmap->n * submats[i]->cmap->n));
278: submats[i]->factortype = C->factortype;
279: }
280: } else {
281: for (i = 0; i < ismax; i++) {
282: PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
283: PetscCall(MatSetSizes(submats[i], nrow[i], ncol[i], nrow[i], ncol[i]));
284: PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name));
285: PetscCall(MatSeqDenseSetPreallocation(submats[i], NULL));
286: }
287: }
289: /* Assemble the matrices */
290: {
291: PetscInt col;
292: PetscScalar *imat_v, *mat_v, *imat_vi, *mat_vi;
294: for (i = 0; i < ismax; i++) {
295: mat = (Mat_SeqDense *)submats[i]->data;
296: mat_v = a->v;
297: imat_v = mat->v;
298: irow_i = irow[i];
299: m = nrow[i];
300: for (j = 0; j < m; j++) {
301: row = irow_i[j];
302: proc = rtable[row];
303: if (proc == rank) {
304: row = row - rstart;
305: mat_vi = mat_v + row;
306: imat_vi = imat_v + j;
307: for (k = 0; k < ncol[i]; k++) {
308: col = icol[i][k];
309: imat_vi[k * m] = mat_vi[col * a->lda];
310: }
311: }
312: }
313: }
314: }
316: /* Create row map-> This maps c->row to submat->row for each submat*/
317: /* this is a very expensive operation wrt memory usage */
318: PetscCall(PetscMalloc1(ismax, &rmap));
319: PetscCall(PetscCalloc1(ismax * C->rmap->N, &rmap[0]));
320: for (i = 1; i < ismax; i++) rmap[i] = rmap[i - 1] + C->rmap->N;
321: for (i = 0; i < ismax; i++) {
322: rmap_i = rmap[i];
323: irow_i = irow[i];
324: jmax = nrow[i];
325: for (j = 0; j < jmax; j++) rmap_i[irow_i[j]] = j;
326: }
328: /* Now Receive the row_values and assemble the rest of the matrix */
329: PetscCall(PetscMalloc1(nrqs + 1, &r_status2));
330: {
331: PetscInt is_max, tmp1, col, *sbuf1_i, is_sz;
332: PetscScalar *rbuf2_i, *imat_v, *imat_vi;
334: for (tmp1 = 0; tmp1 < nrqs; tmp1++) { /* For each message */
335: PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &i, r_status2 + tmp1));
336: /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
337: sbuf1_i = sbuf1[pa[i]];
338: is_max = sbuf1_i[0];
339: ct1 = 2 * is_max + 1;
340: rbuf2_i = rbuf2[i];
341: for (j = 1; j <= is_max; j++) { /* For each IS belonging to the message */
342: is_no = sbuf1_i[2 * j - 1];
343: is_sz = sbuf1_i[2 * j];
344: mat = (Mat_SeqDense *)submats[is_no]->data;
345: imat_v = mat->v;
346: rmap_i = rmap[is_no];
347: m = nrow[is_no];
348: for (k = 0; k < is_sz; k++, rbuf2_i += N) { /* For each row */
349: row = sbuf1_i[ct1];
350: ct1++;
351: row = rmap_i[row];
352: imat_vi = imat_v + row;
353: for (l = 0; l < ncol[is_no]; l++) { /* For each col */
354: col = icol[is_no][l];
355: imat_vi[l * m] = rbuf2_i[col];
356: }
357: }
358: }
359: }
360: }
361: /* End Send-Recv of row_values */
362: PetscCall(PetscFree(r_status2));
363: PetscCall(PetscFree(r_waits2));
364: PetscCall(PetscMalloc1(nrqr + 1, &s_status2));
365: if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status2));
366: PetscCall(PetscFree(s_status2));
367: PetscCall(PetscFree(s_waits2));
369: /* Restore the indices */
370: for (i = 0; i < ismax; i++) {
371: PetscCall(ISRestoreIndices(isrow[i], irow + i));
372: PetscCall(ISRestoreIndices(iscol[i], icol + i));
373: }
375: PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, rtable));
376: PetscCall(PetscFree3(w1, w3, w4));
377: PetscCall(PetscFree(pa));
379: for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf2[i]));
380: PetscCall(PetscFree(rbuf2));
381: PetscCall(PetscFree4(sbuf1, ptr, tmp, ctr));
382: PetscCall(PetscFree(rbuf1[0]));
383: PetscCall(PetscFree(rbuf1));
385: for (i = 0; i < nrqr; ++i) PetscCall(PetscFree(sbuf2[i]));
387: PetscCall(PetscFree(sbuf2));
388: PetscCall(PetscFree(rmap[0]));
389: PetscCall(PetscFree(rmap));
391: for (i = 0; i < ismax; i++) {
392: PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
393: PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
394: }
395: PetscFunctionReturn(PETSC_SUCCESS);
396: }
398: PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA, PetscScalar alpha)
399: {
400: Mat_MPIDense *A = (Mat_MPIDense *)inA->data;
402: PetscFunctionBegin;
403: PetscCall(MatScale(A->A, alpha));
404: PetscFunctionReturn(PETSC_SUCCESS);
405: }