2: /***********************************comm.c*************************************
4: Author: Henry M. Tufo III
6: e-mail: hmt@cs.brown.edu
8: snail-mail:
9: Division of Applied Mathematics
10: Brown University
11: Providence, RI 02912
13: Last Modification:
14: 11.21.97
15: ***********************************comm.c*************************************/
16: #include <../src/ksp/pc/impls/tfs/tfs.h> 19: /* global program control variables - explicitly exported */
20: PetscMPIInt PCTFS_my_id = 0;
21: PetscMPIInt PCTFS_num_nodes = 1;
22: PetscMPIInt PCTFS_floor_num_nodes = 0;
23: PetscMPIInt PCTFS_i_log2_num_nodes = 0;
25: /* global program control variables */
26: static PetscInt p_init = 0;
27: static PetscInt modfl_num_nodes;
28: static PetscInt edge_not_pow_2;
30: static PetscInt edge_node[sizeof(PetscInt)*32];
32: /***********************************comm.c*************************************/
33: PetscErrorCode PCTFS_comm_init(void) 34: {
36: if (p_init++) return(0);
38: MPI_Comm_size(MPI_COMM_WORLD,&PCTFS_num_nodes);
39: MPI_Comm_rank(MPI_COMM_WORLD,&PCTFS_my_id);
41: if (PCTFS_num_nodes> (INT_MAX >> 1)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Can't have more then MAX_INT/2 nodes!!!");
43: PCTFS_ivec_zero((PetscInt*)edge_node,sizeof(PetscInt)*32);
45: PCTFS_floor_num_nodes = 1;
46: PCTFS_i_log2_num_nodes = modfl_num_nodes = 0;
47: while (PCTFS_floor_num_nodes <= PCTFS_num_nodes) {
48: edge_node[PCTFS_i_log2_num_nodes] = PCTFS_my_id ^ PCTFS_floor_num_nodes;
49: PCTFS_floor_num_nodes <<= 1;
50: PCTFS_i_log2_num_nodes++;
51: }
53: PCTFS_i_log2_num_nodes--;
54: PCTFS_floor_num_nodes >>= 1;
55: modfl_num_nodes = (PCTFS_num_nodes - PCTFS_floor_num_nodes);
57: if ((PCTFS_my_id > 0) && (PCTFS_my_id <= modfl_num_nodes)) edge_not_pow_2=((PCTFS_my_id|PCTFS_floor_num_nodes)-1);
58: else if (PCTFS_my_id >= PCTFS_floor_num_nodes) edge_not_pow_2=((PCTFS_my_id^PCTFS_floor_num_nodes)+1);
59: else edge_not_pow_2 = 0;
60: return(0);
61: }
63: /***********************************comm.c*************************************/
64: PetscErrorCode PCTFS_giop(PetscInt *vals, PetscInt *work, PetscInt n, PetscInt *oprs) 65: {
66: PetscInt mask, edge;
67: PetscInt type, dest;
68: vfp fp;
69: MPI_Status status;
70: PetscInt ierr;
73: /* ok ... should have some data, work, and operator(s) */
74: if (!vals||!work||!oprs) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop() :: vals=%D, work=%D, oprs=%D",vals,work,oprs);
76: /* non-uniform should have at least two entries */
77: if ((oprs[0] == NON_UNIFORM)&&(n<2)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop() :: non_uniform and n=0,1?");
79: /* check to make sure comm package has been initialized */
80: if (!p_init) PCTFS_comm_init();
82: /* if there's nothing to do return */
83: if ((PCTFS_num_nodes<2)||(!n)) return(0);
85: /* a negative number if items to send ==> fatal */
86: if (n<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop() :: n=%D<0?",n);
88: /* advance to list of n operations for custom */
89: if ((type=oprs[0])==NON_UNIFORM) oprs++;
91: /* major league hack */
92: if (!(fp = (vfp) PCTFS_ivec_fct_addr(type))) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop() :: Could not retrieve function pointer!\n");
94: /* all msgs will be of the same length */
95: /* if not a hypercube must colapse partial dim */
96: if (edge_not_pow_2) {
97: if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
98: MPI_Send(vals,n,MPIU_INT,edge_not_pow_2,MSGTAG0+PCTFS_my_id,MPI_COMM_WORLD);
99: } else {
100: MPI_Recv(work,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG0+edge_not_pow_2, MPI_COMM_WORLD,&status);
101: (*fp)(vals,work,n,oprs);
102: }
103: }
105: /* implement the mesh fan in/out exchange algorithm */
106: if (PCTFS_my_id<PCTFS_floor_num_nodes) {
107: for (mask=1,edge=0; edge<PCTFS_i_log2_num_nodes; edge++,mask<<=1) {
108: dest = PCTFS_my_id^mask;
109: if (PCTFS_my_id > dest) {
110: MPI_Send(vals,n,MPIU_INT,dest,MSGTAG2+PCTFS_my_id,MPI_COMM_WORLD);
111: } else {
112: MPI_Recv(work,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG2+dest,MPI_COMM_WORLD, &status);
113: (*fp)(vals, work, n, oprs);
114: }
115: }
117: mask=PCTFS_floor_num_nodes>>1;
118: for (edge=0; edge<PCTFS_i_log2_num_nodes; edge++,mask>>=1) {
119: if (PCTFS_my_id%mask) continue;
121: dest = PCTFS_my_id^mask;
122: if (PCTFS_my_id < dest) {
123: MPI_Send(vals,n,MPIU_INT,dest,MSGTAG4+PCTFS_my_id,MPI_COMM_WORLD);
124: } else {
125: MPI_Recv(vals,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG4+dest,MPI_COMM_WORLD, &status);
126: }
127: }
128: }
130: /* if not a hypercube must expand to partial dim */
131: if (edge_not_pow_2) {
132: if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
133: MPI_Recv(vals,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG5+edge_not_pow_2,MPI_COMM_WORLD,&status);
134: } else {
135: MPI_Send(vals,n,MPIU_INT,edge_not_pow_2,MSGTAG5+PCTFS_my_id,MPI_COMM_WORLD);
136: }
137: }
138: return(0);
139: }
141: /***********************************comm.c*************************************/
142: PetscErrorCode PCTFS_grop(PetscScalar *vals, PetscScalar *work, PetscInt n, PetscInt *oprs)143: {
144: PetscInt mask, edge;
145: PetscInt type, dest;
146: vfp fp;
147: MPI_Status status;
151: /* ok ... should have some data, work, and operator(s) */
152: if (!vals||!work||!oprs) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop() :: vals=%D, work=%D, oprs=%D",vals,work,oprs);
154: /* non-uniform should have at least two entries */
155: if ((oprs[0] == NON_UNIFORM)&&(n<2)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop() :: non_uniform and n=0,1?");
157: /* check to make sure comm package has been initialized */
158: if (!p_init) PCTFS_comm_init();
160: /* if there's nothing to do return */
161: if ((PCTFS_num_nodes<2)||(!n)) return(0);
163: /* a negative number of items to send ==> fatal */
164: if (n<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"gdop() :: n=%D<0?",n);
166: /* advance to list of n operations for custom */
167: if ((type=oprs[0])==NON_UNIFORM) oprs++;
169: if (!(fp = (vfp) PCTFS_rvec_fct_addr(type))) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop() :: Could not retrieve function pointer!\n");
171: /* all msgs will be of the same length */
172: /* if not a hypercube must colapse partial dim */
173: if (edge_not_pow_2) {
174: if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
175: MPI_Send(vals,n,MPIU_SCALAR,edge_not_pow_2,MSGTAG0+PCTFS_my_id,MPI_COMM_WORLD);
176: } else {
177: MPI_Recv(work,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG0+edge_not_pow_2,MPI_COMM_WORLD,&status);
178: (*fp)(vals,work,n,oprs);
179: }
180: }
182: /* implement the mesh fan in/out exchange algorithm */
183: if (PCTFS_my_id<PCTFS_floor_num_nodes) {
184: for (mask=1,edge=0; edge<PCTFS_i_log2_num_nodes; edge++,mask<<=1) {
185: dest = PCTFS_my_id^mask;
186: if (PCTFS_my_id > dest) {
187: MPI_Send(vals,n,MPIU_SCALAR,dest,MSGTAG2+PCTFS_my_id,MPI_COMM_WORLD);
188: } else {
189: MPI_Recv(work,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG2+dest,MPI_COMM_WORLD, &status);
190: (*fp)(vals, work, n, oprs);
191: }
192: }
194: mask=PCTFS_floor_num_nodes>>1;
195: for (edge=0; edge<PCTFS_i_log2_num_nodes; edge++,mask>>=1) {
196: if (PCTFS_my_id%mask) continue;
198: dest = PCTFS_my_id^mask;
199: if (PCTFS_my_id < dest) {
200: MPI_Send(vals,n,MPIU_SCALAR,dest,MSGTAG4+PCTFS_my_id,MPI_COMM_WORLD);
201: } else {
202: MPI_Recv(vals,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG4+dest,MPI_COMM_WORLD, &status);
203: }
204: }
205: }
207: /* if not a hypercube must expand to partial dim */
208: if (edge_not_pow_2) {
209: if (PCTFS_my_id >= PCTFS_floor_num_nodes) {
210: MPI_Recv(vals,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG5+edge_not_pow_2, MPI_COMM_WORLD,&status);
211: } else {
212: MPI_Send(vals,n,MPIU_SCALAR,edge_not_pow_2,MSGTAG5+PCTFS_my_id,MPI_COMM_WORLD);
213: }
214: }
215: return(0);
216: }
218: /***********************************comm.c*************************************/
219: PetscErrorCode PCTFS_grop_hc(PetscScalar *vals, PetscScalar *work, PetscInt n, PetscInt *oprs, PetscInt dim)220: {
221: PetscInt mask, edge;
222: PetscInt type, dest;
223: vfp fp;
224: MPI_Status status;
228: /* ok ... should have some data, work, and operator(s) */
229: if (!vals||!work||!oprs) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop_hc() :: vals=%D, work=%D, oprs=%D",vals,work,oprs);
231: /* non-uniform should have at least two entries */
232: if ((oprs[0] == NON_UNIFORM)&&(n<2)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop_hc() :: non_uniform and n=0,1?");
234: /* check to make sure comm package has been initialized */
235: if (!p_init) PCTFS_comm_init();
237: /* if there's nothing to do return */
238: if ((PCTFS_num_nodes<2)||(!n)||(dim<=0)) return(0);
240: /* the error msg says it all!!! */
241: if (modfl_num_nodes) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop_hc() :: PCTFS_num_nodes not a power of 2!?!");
243: /* a negative number of items to send ==> fatal */
244: if (n<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop_hc() :: n=%D<0?",n);
246: /* can't do more dimensions then exist */
247: dim = PetscMin(dim,PCTFS_i_log2_num_nodes);
249: /* advance to list of n operations for custom */
250: if ((type=oprs[0])==NON_UNIFORM) oprs++;
252: if (!(fp = (vfp) PCTFS_rvec_fct_addr(type))) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_grop_hc() :: Could not retrieve function pointer!\n");
254: for (mask=1,edge=0; edge<dim; edge++,mask<<=1) {
255: dest = PCTFS_my_id^mask;
256: if (PCTFS_my_id > dest) {
257: MPI_Send(vals,n,MPIU_SCALAR,dest,MSGTAG2+PCTFS_my_id,MPI_COMM_WORLD);
258: } else {
259: MPI_Recv(work,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG2+dest,MPI_COMM_WORLD,&status);
260: (*fp)(vals, work, n, oprs);
261: }
262: }
264: if (edge==dim) mask>>=1;
265: else {
266: while (++edge<dim) mask<<=1;
267: }
269: for (edge=0; edge<dim; edge++,mask>>=1) {
270: if (PCTFS_my_id%mask) continue;
272: dest = PCTFS_my_id^mask;
273: if (PCTFS_my_id < dest) {
274: MPI_Send(vals,n,MPIU_SCALAR,dest,MSGTAG4+PCTFS_my_id,MPI_COMM_WORLD);
275: } else {
276: MPI_Recv(vals,n,MPIU_SCALAR,MPI_ANY_SOURCE,MSGTAG4+dest,MPI_COMM_WORLD,&status);
277: }
278: }
279: return(0);
280: }
282: /******************************************************************************/
283: PetscErrorCode PCTFS_ssgl_radd(PetscScalar *vals, PetscScalar *work, PetscInt level, PetscInt *segs)284: {
285: PetscInt edge, type, dest, mask;
286: PetscInt stage_n;
287: MPI_Status status;
291: /* check to make sure comm package has been initialized */
292: if (!p_init) PCTFS_comm_init();
294: /* all msgs are *NOT* the same length */
295: /* implement the mesh fan in/out exchange algorithm */
296: for (mask=0, edge=0; edge<level; edge++, mask++) {
297: stage_n = (segs[level] - segs[edge]);
298: if (stage_n && !(PCTFS_my_id & mask)) {
299: dest = edge_node[edge];
300: type = MSGTAG3 + PCTFS_my_id + (PCTFS_num_nodes*edge);
301: if (PCTFS_my_id>dest) {
302: MPI_Send(vals+segs[edge],stage_n,MPIU_SCALAR,dest,type, MPI_COMM_WORLD);
303: } else {
304: type = type - PCTFS_my_id + dest;
305: MPI_Recv(work,stage_n,MPIU_SCALAR,MPI_ANY_SOURCE,type,MPI_COMM_WORLD,&status);
306: PCTFS_rvec_add(vals+segs[edge], work, stage_n);
307: }
308: }
309: mask <<= 1;
310: }
311: mask>>=1;
312: for (edge=0; edge<level; edge++) {
313: stage_n = (segs[level] - segs[level-1-edge]);
314: if (stage_n && !(PCTFS_my_id & mask)) {
315: dest = edge_node[level-edge-1];
316: type = MSGTAG6 + PCTFS_my_id + (PCTFS_num_nodes*edge);
317: if (PCTFS_my_id<dest) {
318: MPI_Send(vals+segs[level-1-edge],stage_n,MPIU_SCALAR,dest,type,MPI_COMM_WORLD);
319: } else {
320: type = type - PCTFS_my_id + dest;
321: MPI_Recv(vals+segs[level-1-edge],stage_n,MPIU_SCALAR, MPI_ANY_SOURCE,type,MPI_COMM_WORLD,&status);
322: }
323: }
324: mask >>= 1;
325: }
326: return(0);
327: }
329: /***********************************comm.c*************************************/
330: PetscErrorCode PCTFS_giop_hc(PetscInt *vals, PetscInt *work, PetscInt n, PetscInt *oprs, PetscInt dim)331: {
332: PetscInt mask, edge;
333: PetscInt type, dest;
334: vfp fp;
335: MPI_Status status;
339: /* ok ... should have some data, work, and operator(s) */
340: if (!vals||!work||!oprs) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop_hc() :: vals=%D, work=%D, oprs=%D",vals,work,oprs);
342: /* non-uniform should have at least two entries */
343: if ((oprs[0] == NON_UNIFORM)&&(n<2)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop_hc() :: non_uniform and n=0,1?");
345: /* check to make sure comm package has been initialized */
346: if (!p_init) PCTFS_comm_init();
348: /* if there's nothing to do return */
349: if ((PCTFS_num_nodes<2)||(!n)||(dim<=0)) return(0);
351: /* the error msg says it all!!! */
352: if (modfl_num_nodes) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop_hc() :: PCTFS_num_nodes not a power of 2!?!");
354: /* a negative number of items to send ==> fatal */
355: if (n<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop_hc() :: n=%D<0?",n);
357: /* can't do more dimensions then exist */
358: dim = PetscMin(dim,PCTFS_i_log2_num_nodes);
360: /* advance to list of n operations for custom */
361: if ((type=oprs[0])==NON_UNIFORM) oprs++;
363: if (!(fp = (vfp) PCTFS_ivec_fct_addr(type))) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCTFS_giop_hc() :: Could not retrieve function pointer!\n");
365: for (mask=1,edge=0; edge<dim; edge++,mask<<=1) {
366: dest = PCTFS_my_id^mask;
367: if (PCTFS_my_id > dest) {
368: MPI_Send(vals,n,MPIU_INT,dest,MSGTAG2+PCTFS_my_id,MPI_COMM_WORLD);
369: } else {
370: MPI_Recv(work,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG2+dest,MPI_COMM_WORLD, &status);
371: (*fp)(vals, work, n, oprs);
372: }
373: }
375: if (edge==dim) mask>>=1;
376: else {
377: while (++edge<dim) mask<<=1;
378: }
380: for (edge=0; edge<dim; edge++,mask>>=1) {
381: if (PCTFS_my_id%mask) continue;
383: dest = PCTFS_my_id^mask;
384: if (PCTFS_my_id < dest) {
385: MPI_Send(vals,n,MPIU_INT,dest,MSGTAG4+PCTFS_my_id,MPI_COMM_WORLD);
386: } else {
387: MPI_Recv(vals,n,MPIU_INT,MPI_ANY_SOURCE,MSGTAG4+dest,MPI_COMM_WORLD,&status);
388: }
389: }
390: return(0);
391: }