Actual source code: mpi.c
petsc-3.13.6 2020-09-29
1: /*
2: This provides a few of the MPI-uni functions that cannot be implemented
3: with C macros
4: */
5: #include <petscsys.h>
6: #if !defined(MPIUNI_H)
7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8: #endif
10: #define MPI_SUCCESS 0
11: #define MPI_FAILURE 1
13: void *MPIUNI_TMP = NULL;
15: /*
16: With MPI Uni there are exactly four distinct communicators:
17: MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
19: MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
20: the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
22: */
23: #define MAX_ATTR 256
24: #define MAX_COMM 128
26: typedef struct {
27: void *attribute_val;
28: int active;
29: } MPI_Attr;
31: typedef struct {
32: void *extra_state;
33: MPI_Delete_function *del;
34: int active; /* Is this keyval in use by some comm? */
35: } MPI_Attr_keyval;
37: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
38: static MPI_Attr attr[MAX_COMM][MAX_ATTR];
39: static int comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
40: static int mpi_tag_ub = 100000000;
41: static int num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
42: static int MaxComm = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
43: static void* MPIUNIF_mpi_in_place = 0;
45: #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
47: #if defined(__cplusplus)
48: extern "C" {
49: #endif
51: /*
52: To avoid problems with prototypes to the system memcpy() it is duplicated here
53: */
54: int MPIUNI_Memcpy(void *a,const void *b,int n)
55: {
56: int i;
57: char *aa= (char*)a;
58: char *bb= (char*)b;
60: if (a == MPI_IN_PLACE || a == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
61: if (b == MPI_IN_PLACE || b == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
62: for (i=0; i<n; i++) aa[i] = bb[i];
63: return MPI_SUCCESS;
64: }
66: static int classcnt = 0;
67: static int codecnt = 0;
69: int MPI_Add_error_class(int *cl)
70: {
71: *cl = classcnt++;
72: return MPI_SUCCESS;
73: }
75: int MPI_Add_error_code(int cl,int *co)
76: {
77: if (cl >= classcnt) return MPI_FAILURE;
78: *co = codecnt++;
79: return MPI_SUCCESS;
80: }
82: int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
83: {
84: int comb = datatype >> 28;
85: switch (comb) {
86: case MPI_COMBINER_NAMED:
87: *num_integers = 0;
88: *num_addresses = 0;
89: *num_datatypes = 0;
90: *combiner = comb;
91: break;
92: case MPI_COMBINER_DUP:
93: *num_integers = 0;
94: *num_addresses = 0;
95: *num_datatypes = 1;
96: *combiner = comb;
97: break;
98: case MPI_COMBINER_CONTIGUOUS:
99: *num_integers = 1;
100: *num_addresses = 0;
101: *num_datatypes = 1;
102: *combiner = comb;
103: break;
104: default:
105: return MPIUni_Abort(MPI_COMM_SELF,1);
106: }
107: return MPI_SUCCESS;
108: }
110: int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
111: {
112: int comb = datatype >> 28;
113: switch (comb) {
114: case MPI_COMBINER_NAMED:
115: return MPIUni_Abort(MPI_COMM_SELF,1);
116: break;
117: case MPI_COMBINER_DUP:
118: if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
119: array_of_datatypes[0] = datatype & 0x0fffffff;
120: break;
121: case MPI_COMBINER_CONTIGUOUS:
122: if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
123: array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
124: array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
125: break;
126: default:
127: return MPIUni_Abort(MPI_COMM_SELF,1);
128: }
129: return MPI_SUCCESS;
130: }
132: /*
133: Used to set the built-in MPI_TAG_UB attribute
134: */
135: static int Keyval_setup(void)
136: {
137: attr[CommIdx(MPI_COMM_WORLD)][0].active = 1;
138: attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
139: attr[CommIdx(MPI_COMM_SELF )][0].active = 1;
140: attr[CommIdx(MPI_COMM_SELF )][0].attribute_val = &mpi_tag_ub;
141: attr_keyval[0].active = 1;
142: return MPI_SUCCESS;
143: }
145: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
146: {
147: int i,keyid;
148: for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
149: if (!attr_keyval[i].active) {
150: keyid = i;
151: goto found;
152: }
153: }
154: if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
155: keyid = num_attr++;
157: found:
158: attr_keyval[keyid].extra_state = extra_state;
159: attr_keyval[keyid].del = delete_fn;
160: attr_keyval[keyid].active = 1;
161: *keyval = keyid;
162: return MPI_SUCCESS;
163: }
165: int MPI_Comm_free_keyval(int *keyval)
166: {
167: attr_keyval[*keyval].extra_state = 0;
168: attr_keyval[*keyval].del = 0;
169: attr_keyval[*keyval].active = 0;
170: *keyval = 0;
171: return MPI_SUCCESS;
172: }
174: int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
175: {
176: int idx = CommIdx(comm);
177: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
178: attr[idx][keyval].active = 1;
179: attr[idx][keyval].attribute_val = attribute_val;
180: return MPI_SUCCESS;
181: }
183: int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
184: {
185: int idx = CommIdx(comm);
186: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
187: if (attr[idx][keyval].active && attr_keyval[keyval].del) {
188: void *save_attribute_val = attr[idx][keyval].attribute_val;
189: attr[idx][keyval].active = 0;
190: attr[idx][keyval].attribute_val = 0;
191: (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
192: }
193: return MPI_SUCCESS;
194: }
196: int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
197: {
198: int idx = CommIdx(comm);
199: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200: if (!keyval) Keyval_setup();
201: *flag = attr[idx][keyval].active;
202: *(void**)attribute_val = attr[idx][keyval].attribute_val;
203: return MPI_SUCCESS;
204: }
206: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
207: {
208: int j;
209: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
210: for (j=3; j<=MaxComm; j++) {
211: if (!comm_active[CommIdx(j)]) {
212: comm_active[CommIdx(j)] = 1;
213: *newcomm = j;
214: return MPI_SUCCESS;
215: }
216: }
217: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
218: *newcomm = ++MaxComm;
219: comm_active[CommIdx(*newcomm)] = 1;
220: return MPI_SUCCESS;
221: }
223: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
224: {
225: int j;
226: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
227: for (j=3; j<=MaxComm; j++) {
228: if (!comm_active[CommIdx(j)]) {
229: comm_active[CommIdx(j)] = 1;
230: *out = j;
231: return MPI_SUCCESS;
232: }
233: }
234: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
235: *out = ++MaxComm;
236: comm_active[CommIdx(*out)] = 1;
237: return MPI_SUCCESS;
238: }
240: int MPI_Comm_free(MPI_Comm *comm)
241: {
242: int i;
243: int idx = CommIdx(*comm);
245: if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
246: for (i=0; i<num_attr; i++) {
247: if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
248: attr[idx][i].active = 0;
249: attr[idx][i].attribute_val = 0;
250: }
251: if (*comm >= 3) comm_active[idx] = 0;
252: *comm = 0;
253: return MPI_SUCCESS;
254: }
256: int MPI_Comm_size(MPI_Comm comm, int *size)
257: {
258: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
259: *size=1;
260: return MPI_SUCCESS;
261: }
263: int MPI_Comm_rank(MPI_Comm comm, int *rank)
264: {
265: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
266: *rank=0;
267: return MPI_SUCCESS;
268: }
270: int MPIUni_Abort(MPI_Comm comm,int errorcode)
271: {
272: printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
273: return MPI_FAILURE;
274: }
276: int MPI_Abort(MPI_Comm comm,int errorcode)
277: {
278: abort();
279: return MPI_SUCCESS;
280: }
282: /* --------------------------------------------------------------------------*/
284: static int MPI_was_initialized = 0;
285: static int MPI_was_finalized = 0;
287: int MPI_Init(int *argc, char ***argv)
288: {
289: if (MPI_was_initialized) return MPI_FAILURE;
290: if (MPI_was_finalized) return MPI_FAILURE; /* MPI standard: once MPI_FINALIZE returns, no MPI routine (not even MPI_INIT) may be called, except ... */
291: MPI_was_initialized = 1;
292: return MPI_SUCCESS;
293: }
295: int MPI_Finalize(void)
296: {
297: MPI_Comm comm;
298: if (MPI_was_finalized) return MPI_FAILURE;
299: if (!MPI_was_initialized) return MPI_FAILURE;
300: comm = MPI_COMM_WORLD;
301: MPI_Comm_free(&comm);
302: comm = MPI_COMM_SELF;
303: MPI_Comm_free(&comm);
304: #if defined(PETSC_USE_DEBUG)
305: {
306: int i;
307: for (i=3; i<=MaxComm; i++) {
308: if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
309: }
310: }
311: #endif
312: /* reset counters */
313: MaxComm = 2;
314: num_attr = 1;
315: MPI_was_finalized = 1;
316: return MPI_SUCCESS;
317: }
319: int MPI_Initialized(int *flag)
320: {
321: *flag = MPI_was_initialized;
322: return MPI_SUCCESS;
323: }
325: int MPI_Finalized(int *flag)
326: {
327: *flag = MPI_was_finalized;
328: return MPI_SUCCESS;
329: }
331: /* ------------------- Fortran versions of several routines ------------------ */
333: #if defined(PETSC_HAVE_FORTRAN_CAPS)
334: #define mpiunisetmoduleblock_ MPIUNISETMODULEBLOCK
335: #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
336: #define petsc_mpi_init_ PETSC_MPI_INIT
337: #define petsc_mpi_finalize_ PETSC_MPI_FINALIZE
338: #define petsc_mpi_comm_size_ PETSC_MPI_COMM_SIZE
339: #define petsc_mpi_comm_rank_ PETSC_MPI_COMM_RANK
340: #define petsc_mpi_abort_ PETSC_MPI_ABORT
341: #define petsc_mpi_reduce_ PETSC_MPI_REDUCE
342: #define petsc_mpi_allreduce_ PETSC_MPI_ALLREDUCE
343: #define petsc_mpi_barrier_ PETSC_MPI_BARRIER
344: #define petsc_mpi_bcast_ PETSC_MPI_BCAST
345: #define petsc_mpi_gather_ PETSC_MPI_GATHER
346: #define petsc_mpi_allgather_ PETSC_MPI_ALLGATHER
347: #define petsc_mpi_comm_split_ PETSC_MPI_COMM_SPLIT
348: #define petsc_mpi_scan_ PETSC_MPI_SCAN
349: #define petsc_mpi_send_ PETSC_MPI_SEND
350: #define petsc_mpi_recv_ PETSC_MPI_RECV
351: #define petsc_mpi_reduce_scatter_ PETSC_MPI_REDUCE_SCATTER
352: #define petsc_mpi_irecv_ PETSC_MPI_IRECV
353: #define petsc_mpi_isend_ PETSC_MPI_ISEND
354: #define petsc_mpi_sendrecv_ PETSC_MPI_SENDRECV
355: #define petsc_mpi_test_ PETSC_MPI_TEST
356: #define petsc_mpi_waitall_ PETSC_MPI_WAITALL
357: #define petsc_mpi_waitany_ PETSC_MPI_WAITANY
358: #define petsc_mpi_allgatherv_ PETSC_MPI_ALLGATHERV
359: #define petsc_mpi_alltoallv_ PETSC_MPI_ALLTOALLV
360: #define petsc_mpi_comm_create_ PETSC_MPI_COMM_CREATE
361: #define petsc_mpi_address_ PETSC_MPI_ADDRESS
362: #define petsc_mpi_pack_ PETSC_MPI_PACK
363: #define petsc_mpi_unpack_ PETSC_MPI_UNPACK
364: #define petsc_mpi_pack_size_ PETSC_MPI_PACK_SIZE
365: #define petsc_mpi_type_struct_ PETSC_MPI_TYPE_STRUCT
366: #define petsc_mpi_type_commit_ PETSC_MPI_TYPE_COMMIT
367: #define petsc_mpi_wtime_ PETSC_MPI_WTIME
368: #define petsc_mpi_cancel_ PETSC_MPI_CANCEL
369: #define petsc_mpi_comm_dup_ PETSC_MPI_COMM_DUP
370: #define petsc_mpi_comm_free_ PETSC_MPI_COMM_FREE
371: #define petsc_mpi_get_count_ PETSC_MPI_GET_COUNT
372: #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
373: #define petsc_mpi_initialized_ PETSC_MPI_INITIALIZED
374: #define petsc_mpi_iprobe_ PETSC_MPI_IPROBE
375: #define petsc_mpi_probe_ PETSC_MPI_PROBE
376: #define petsc_mpi_request_free_ PETSC_MPI_REQUEST_FREE
377: #define petsc_mpi_ssend_ PETSC_MPI_SSEND
378: #define petsc_mpi_wait_ PETSC_MPI_WAIT
379: #define petsc_mpi_comm_group_ PETSC_MPI_COMM_GROUP
380: #define petsc_mpi_exscan_ PETSC_MPI_EXSCAN
381: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
382: #define mpiunisetmoduleblock_ mpiunisetmoduleblock
383: #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
384: #define petsc_mpi_init_ petsc_mpi_init
385: #define petsc_mpi_finalize_ petsc_mpi_finalize
386: #define petsc_mpi_comm_size_ petsc_mpi_comm_size
387: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank
388: #define petsc_mpi_abort_ petsc_mpi_abort
389: #define petsc_mpi_reduce_ petsc_mpi_reduce
390: #define petsc_mpi_allreduce_ petsc_mpi_allreduce
391: #define petsc_mpi_barrier_ petsc_mpi_barrier
392: #define petsc_mpi_bcast_ petsc_mpi_bcast
393: #define petsc_mpi_gather_ petsc_mpi_gather
394: #define petsc_mpi_allgather_ petsc_mpi_allgather
395: #define petsc_mpi_comm_split_ petsc_mpi_comm_split
396: #define petsc_mpi_scan_ petsc_mpi_scan
397: #define petsc_mpi_send_ petsc_mpi_send
398: #define petsc_mpi_recv_ petsc_mpi_recv
399: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter
400: #define petsc_mpi_irecv_ petsc_mpi_irecv
401: #define petsc_mpi_isend_ petsc_mpi_isend
402: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv
403: #define petsc_mpi_test_ petsc_mpi_test
404: #define petsc_mpi_waitall_ petsc_mpi_waitall
405: #define petsc_mpi_waitany_ petsc_mpi_waitany
406: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv
407: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv
408: #define petsc_mpi_comm_create_ petsc_mpi_comm_create
409: #define petsc_mpi_address_ petsc_mpi_address
410: #define petsc_mpi_pack_ petsc_mpi_pack
411: #define petsc_mpi_unpack_ petsc_mpi_unpack
412: #define petsc_mpi_pack_size_ petsc_mpi_pack_size
413: #define petsc_mpi_type_struct_ petsc_mpi_type_struct
414: #define petsc_mpi_type_commit_ petsc_mpi_type_commit
415: #define petsc_mpi_wtime_ petsc_mpi_wtime
416: #define petsc_mpi_cancel_ petsc_mpi_cancel
417: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup
418: #define petsc_mpi_comm_free_ petsc_mpi_comm_free
419: #define petsc_mpi_get_count_ petsc_mpi_get_count
420: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
421: #define petsc_mpi_initialized_ petsc_mpi_initialized
422: #define petsc_mpi_iprobe_ petsc_mpi_iprobe
423: #define petsc_mpi_probe_ petsc_mpi_probe
424: #define petsc_mpi_request_free_ petsc_mpi_request_free
425: #define petsc_mpi_ssend_ petsc_mpi_ssend
426: #define petsc_mpi_wait_ petsc_mpi_wait
427: #define petsc_mpi_comm_group_ petsc_mpi_comm_group
428: #define petsc_mpi_exscan_ petsc_mpi_exscan
429: #endif
431: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
432: #define petsc_mpi_init_ petsc_mpi_init__
433: #define petsc_mpi_finalize_ petsc_mpi_finalize__
434: #define petsc_mpi_comm_size_ petsc_mpi_comm_size__
435: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank__
436: #define petsc_mpi_abort_ petsc_mpi_abort__
437: #define petsc_mpi_reduce_ petsc_mpi_reduce__
438: #define petsc_mpi_allreduce_ petsc_mpi_allreduce__
439: #define petsc_mpi_barrier_ petsc_mpi_barrier__
440: #define petsc_mpi_bcast_ petsc_mpi_bcast__
441: #define petsc_mpi_gather_ petsc_mpi_gather__
442: #define petsc_mpi_allgather_ petsc_mpi_allgather__
443: #define petsc_mpi_comm_split_ petsc_mpi_comm_split__
444: #define petsc_mpi_scan_ petsc_mpi_scan__
445: #define petsc_mpi_send_ petsc_mpi_send__
446: #define petsc_mpi_recv_ petsc_mpi_recv__
447: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter__
448: #define petsc_mpi_irecv_ petsc_mpi_irecv__
449: #define petsc_mpi_isend_ petsc_mpi_isend__
450: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv__
451: #define petsc_mpi_test_ petsc_mpi_test__
452: #define petsc_mpi_waitall_ petsc_mpi_waitall__
453: #define petsc_mpi_waitany_ petsc_mpi_waitany__
454: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv__
455: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv__
456: #define petsc_mpi_comm_create_ petsc_mpi_comm_create__
457: #define petsc_mpi_address_ petsc_mpi_address__
458: #define petsc_mpi_pack_ petsc_mpi_pack__
459: #define petsc_mpi_unpack_ petsc_mpi_unpack__
460: #define petsc_mpi_pack_size_ petsc_mpi_pack_size__
461: #define petsc_mpi_type_struct_ petsc_mpi_type_struct__
462: #define petsc_mpi_type_commit_ petsc_mpi_type_commit__
463: #define petsc_mpi_wtime_ petsc_mpi_wtime__
464: #define petsc_mpi_cancel_ petsc_mpi_cancel__
465: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup__
466: #define petsc_mpi_comm_free_ petsc_mpi_comm_free__
467: #define petsc_mpi_get_count_ petsc_mpi_get_count__
468: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
469: #define petsc_mpi_initialized_ petsc_mpi_initialized__
470: #define petsc_mpi_iprobe_ petsc_mpi_iprobe__
471: #define petsc_mpi_probe_ petsc_mpi_probe__
472: #define petsc_mpi_request_free_ petsc_mpi_request_free__
473: #define petsc_mpi_ssend_ petsc_mpi_ssend__
474: #define petsc_mpi_wait_ petsc_mpi_wait__
475: #define petsc_mpi_comm_group_ petsc_mpi_comm_group__
476: #define petsc_mpi_exscan_ petsc_mpi_exscan__
477: #endif
479: /* Do not build fortran interface if MPI namespace colision is to be avoided */
480: #if defined(PETSC_HAVE_FORTRAN)
482: PETSC_EXTERN void mpiunisetmoduleblock_(void);
484: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
485: {
486: MPIUNIF_mpi_in_place = f_mpi_in_place;
487: }
489: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
490: {
491: mpiunisetmoduleblock_();
492: *MPI_Init((int*)0, (char***)0);
493: }
495: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
496: {
497: *MPI_Finalize();
498: }
500: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
501: {
502: *size = 1;
503: *0;
504: }
506: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
507: {
508: *rank = 0;
509: *MPI_SUCCESS;
510: }
512: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
513: {
514: *newcomm = *comm;
515: *MPI_SUCCESS;
516: }
518: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
519: {
520: abort();
521: *MPI_SUCCESS;
522: }
524: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
525: {
526: *MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
527: }
529: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
530: {
531: *MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
532: }
534: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
535: {
536: *MPI_SUCCESS;
537: }
539: PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
540: {
541: *MPI_SUCCESS;
542: }
544: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
545: {
546: *MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
547: }
549: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
550: {
551: *MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
552: }
554: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
555: {
556: *MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
557: }
559: PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
560: {
561: *MPIUni_Abort(MPI_COMM_WORLD,0);
562: }
564: PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
565: {
566: *MPIUni_Abort(MPI_COMM_WORLD,0);
567: }
569: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
570: {
571: *MPIUni_Abort(MPI_COMM_WORLD,0);
572: }
574: PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
575: {
576: *MPIUni_Abort(MPI_COMM_WORLD,0);
577: }
579: PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
580: {
581: *MPIUni_Abort(MPI_COMM_WORLD,0);
582: }
584: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
585: {
586: *MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
587: }
589: PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
590: {
591: *MPIUni_Abort(MPI_COMM_WORLD,0);
592: }
594: PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
595: {
596: *MPI_SUCCESS;
597: }
599: PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
600: {
601: *MPI_SUCCESS;
602: }
604: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
605: {
606: *MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
607: }
609: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
610: {
611: *MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
612: }
614: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
615: {
616: *newcomm = *comm;
617: *MPI_SUCCESS;
618: }
620: PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
621: {
622: *address = (MPI_Aint) ((char *)location);
623: *MPI_SUCCESS;
624: }
626: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
627: {
628: *MPIUni_Abort(MPI_COMM_WORLD,0);
629: }
631: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
632: {
633: *MPIUni_Abort(MPI_COMM_WORLD,0);
634: }
636: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
637: {
638: *MPIUni_Abort(MPI_COMM_WORLD,0);
639: }
641: PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
642: {
643: *MPIUni_Abort(MPI_COMM_WORLD,0);
644: }
646: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
647: {
648: *MPI_SUCCESS;
649: }
651: double petsc_mpi_wtime_(void)
652: {
653: return 0.0;
654: }
656: PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
657: {
658: *MPI_SUCCESS;
659: }
661: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
662: {
663: *out = *comm;
664: *MPI_SUCCESS;
665: }
667: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
668: {
669: *MPI_SUCCESS;
670: }
672: PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
673: {
674: *MPIUni_Abort(MPI_COMM_WORLD,0);
675: }
677: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
678: {
679: MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
680: *result_len = 9;
681: *MPI_SUCCESS;
682: }
684: PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
685: {
686: *flag = MPI_was_initialized;
687: *MPI_SUCCESS;
688: }
690: PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
691: {
692: *MPI_SUCCESS;
693: }
695: PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
696: {
697: *MPI_SUCCESS;
698: }
700: PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
701: {
702: *MPI_SUCCESS;
703: }
705: PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
706: {
707: *MPIUni_Abort(MPI_COMM_WORLD,0);
708: }
710: PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
711: {
712: *MPI_SUCCESS;
713: }
715: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
716: {
717: *MPI_SUCCESS;
718: }
720: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
721: {
722: *MPI_SUCCESS;
723: }
725: #endif /* PETSC_HAVE_FORTRAN */
727: #if defined(__cplusplus)
728: }
729: #endif