Actual source code: mpi.c
petsc-3.3-p7 2013-05-11
1: /*
2: This provides a few of the MPI-uni functions that cannot be implemented
3: with C macros
4: */
5: #include <mpiuni/mpi.h>
7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8: #endif
9: #if !defined(PETSC_STDCALL)
10: #define PETSC_STDCALL
11: #endif
12: #include <stdio.h>
13: #if defined(PETSC_HAVE_STDLIB_H)
14: #include <stdlib.h>
15: #endif
17: #define MPI_SUCCESS 0
18: #define MPI_FAILURE 1
19: void *MPIUNI_TMP = 0;
20: int MPIUNI_DATASIZE[10] = {sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char),2*sizeof(int),4*sizeof(double),4,8,2*sizeof(double)};
21: /*
22: With MPI Uni there is only one communicator, which is called 1.
23: */
24: #define MAX_ATTR 128
26: typedef struct {
27: void *extra_state;
28: void *attribute_val;
29: int active;
30: MPI_Delete_function *del;
31: } MPI_Attr;
33: static MPI_Attr attr[MAX_ATTR];
34: static int num_attr = 1,mpi_tag_ub = 100000000;
36: #if defined(__cplusplus)
37: extern "C" {
38: #endif
40: /*
41: To avoid problems with prototypes to the system memcpy() it is duplicated here
42: */
43: int MPIUNI_Memcpy(void *a,const void* b,int n) {
44: int i;
45: char *aa= (char*)a;
46: char *bb= (char*)b;
48: if (b == MPI_IN_PLACE) return 0;
49: for (i=0; i<n; i++) aa[i] = bb[i];
50: return 0;
51: }
53: /*
54: Used to set the built-in MPI_TAG_UB attribute
55: */
56: static int Keyval_setup(void)
57: {
58: attr[0].active = 1;
59: attr[0].attribute_val = &mpi_tag_ub;
60: return 0;
61: }
63: int MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
64: {
65: if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
67: attr[num_attr].extra_state = extra_state;
68: attr[num_attr].del = delete_fn;
69: *keyval = num_attr++;
70: return 0;
71: }
73: int MPI_Keyval_free(int *keyval)
74: {
75: attr[*keyval].active = 0;
76: return MPI_SUCCESS;
77: }
79: int MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
80: {
81: attr[keyval].active = 1;
82: attr[keyval].attribute_val = attribute_val;
83: return MPI_SUCCESS;
84: }
85:
86: int MPI_Attr_delete(MPI_Comm comm,int keyval)
87: {
88: if (attr[keyval].active && attr[keyval].del) {
89: void* save_attribute_val = attr[keyval].attribute_val;
90: attr[keyval].active = 0;
91: attr[keyval].attribute_val = 0;
92: (*(attr[keyval].del))(comm,keyval,save_attribute_val,attr[keyval].extra_state);
93: }
94: return MPI_SUCCESS;
95: }
97: int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
98: {
99: if (!keyval) Keyval_setup();
100: *flag = attr[keyval].active;
101: *(void **)attribute_val = attr[keyval].attribute_val;
102: return MPI_SUCCESS;
103: }
105: static int dups = 0;
106: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
107: {
108: dups++;
109: *newcomm = comm;
110: return MPI_SUCCESS;
111: }
113: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
114: {
115: *out = comm;
116: dups++;
117: return 0;
118: }
120: int MPI_Comm_free(MPI_Comm *comm)
121: {
122: int i;
124: if (--dups) return MPI_SUCCESS;
125: for (i=0; i<num_attr; i++) {
126: if (attr[i].active && attr[i].del) {
127: (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
128: }
129: attr[i].active = 0;
130: }
131: return MPI_SUCCESS;
132: }
134: int MPI_Comm_size(MPI_Comm comm, int*size)
135: {
136: *size=1;
137: return MPI_SUCCESS;
138: }
140: int MPI_Comm_rank(MPI_Comm comm, int*rank)
141: {
142: *rank=0;
143: return MPI_SUCCESS;
144: }
146: int MPI_Abort(MPI_Comm comm,int errorcode)
147: {
148: abort();
149: return MPI_SUCCESS;
150: }
152: /* --------------------------------------------------------------------------*/
153:
154: static int MPI_was_initialized = 0;
155: static int MPI_was_finalized = 0;
157: int MPI_Init(int *argc, char ***argv)
158: {
159: if (MPI_was_initialized) return 1;
160: if (MPI_was_finalized) return 1;
161: MPI_was_initialized = 1;
162: return 0;
163: }
165: int MPI_Finalize(void)
166: {
167: if (MPI_was_finalized) return 1;
168: if (!MPI_was_initialized) return 1;
169: MPI_was_finalized = 1;
170: return 0;
171: }
173: int MPI_Initialized(int *flag)
174: {
175: *flag = MPI_was_initialized;
176: return 0;
177: }
179: int MPI_Finalized(int *flag)
180: {
181: *flag = MPI_was_finalized;
182: return 0;
183: }
185: /* ------------------- Fortran versions of several routines ------------------ */
187: #if defined(PETSC_HAVE_FORTRAN_CAPS)
188: #define mpi_init_ MPI_INIT
189: #define mpi_finalize_ MPI_FINALIZE
190: #define mpi_comm_size_ MPI_COMM_SIZE
191: #define mpi_comm_rank_ MPI_COMM_RANK
192: #define mpi_abort_ MPI_ABORT
193: #define mpi_reduce_ MPI_REDUCE
194: #define mpi_allreduce_ MPI_ALLREDUCE
195: #define mpi_barrier_ MPI_BARRIER
196: #define mpi_bcast_ MPI_BCAST
197: #define mpi_gather_ MPI_GATHER
198: #define mpi_allgather_ MPI_ALLGATHER
199: #define mpi_comm_split_ MPI_COMM_SPLIT
200: #define mpi_scan_ MPI_SCAN
201: #define mpi_send_ MPI_SEND
202: #define mpi_recv_ MPI_RECV
203: #define mpi_reduce_scatter_ MPI_REDUCE_SCATTER
204: #define mpi_irecv_ MPI_IRECV
205: #define mpi_isend_ MPI_ISEND
206: #define mpi_sendrecv_ MPI_SENDRECV
207: #define mpi_test_ MPI_TEST
208: #define mpi_waitall_ MPI_WAITALL
209: #define mpi_waitany_ MPI_WAITANY
210: #define mpi_allgatherv_ MPI_ALLGATHERV
211: #define mpi_alltoallv_ MPI_ALLTOALLV
212: #define mpi_comm_create_ MPI_COMM_CREATE
213: #define mpi_address_ MPI_ADDRESS
214: #define mpi_pack_ MPI_PACK
215: #define mpi_unpack_ MPI_UNPACK
216: #define mpi_pack_size_ MPI_PACK_SIZE
217: #define mpi_type_struct_ MPI_TYPE_STRUCT
218: #define mpi_type_commit_ MPI_TYPE_COMMIT
219: #define mpi_wtime_ MPI_WTIME
220: #define mpi_cancel_ MPI_CANCEL
221: #define mpi_comm_dup_ MPI_COMM_DUP
222: #define mpi_comm_free_ MPI_COMM_FREE
223: #define mpi_get_count_ MPI_GET_COUNT
224: #define mpi_get_processor_name_ MPI_GET_PROCESSOR_NAME
225: #define mpi_initialized_ MPI_INITIALIZED
226: #define mpi_iprobe_ MPI_IPROBE
227: #define mpi_probe_ MPI_PROBE
228: #define mpi_request_free_ MPI_REQUEST_FREE
229: #define mpi_ssend_ MPI_SSEND
230: #define mpi_wait_ MPI_WAIT
231: #define mpi_comm_group_ MPI_COMM_GROUP
232: #define mpi_exscan_ MPI_EXSCAN
233: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
234: #define mpi_init_ mpi_init
235: #define mpi_finalize_ mpi_finalize
236: #define mpi_comm_size_ mpi_comm_size
237: #define mpi_comm_rank_ mpi_comm_rank
238: #define mpi_abort_ mpi_abort
239: #define mpi_reduce_ mpi_reduce
240: #define mpi_allreduce_ mpi_allreduce
241: #define mpi_barrier_ mpi_barrier
242: #define mpi_bcast_ mpi_bcast
243: #define mpi_gather_ mpi_gather
244: #define mpi_allgather_ mpi_allgather
245: #define mpi_comm_split_ mpi_comm_split
246: #define mpi_scan_ mpi_scan
247: #define mpi_send_ mpi_send
248: #define mpi_recv_ mpi_recv
249: #define mpi_reduce_scatter_ mpi_reduce_scatter
250: #define mpi_irecv_ mpi_irecv
251: #define mpi_isend_ mpi_isend
252: #define mpi_sendrecv_ mpi_sendrecv
253: #define mpi_test_ mpi_test
254: #define mpi_waitall_ mpi_waitall
255: #define mpi_waitany_ mpi_waitany
256: #define mpi_allgatherv_ mpi_allgatherv
257: #define mpi_alltoallv_ mpi_alltoallv
258: #define mpi_comm_create_ mpi_comm_create
259: #define mpi_address_ mpi_address
260: #define mpi_pack_ mpi_pack
261: #define mpi_unpack_ mpi_unpack
262: #define mpi_pack_size_ mpi_pack_size
263: #define mpi_type_struct_ mpi_type_struct
264: #define mpi_type_commit_ mpi_type_commit
265: #define mpi_wtime_ mpi_wtime
266: #define mpi_cancel_ mpi_cancel
267: #define mpi_comm_dup_ mpi_comm_dup
268: #define mpi_comm_free_ mpi_comm_free
269: #define mpi_get_count_ mpi_get_count
270: #define mpi_get_processor_name_ mpi_get_processor_name
271: #define mpi_initialized_ mpi_initialized
272: #define mpi_iprobe_ mpi_iprobe
273: #define mpi_probe_ mpi_probe
274: #define mpi_request_free_ mpi_request_free
275: #define mpi_ssend_ mpi_ssend
276: #define mpi_wait_ mpi_wait
277: #define mpi_comm_group_ mpi_comm_group
278: #define mpi_exscan_ mpi_exscan
279: #endif
281: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
282: #define mpi_init_ mpi_init__
283: #define mpi_finalize_ mpi_finalize__
284: #define mpi_comm_size_ mpi_comm_size__
285: #define mpi_comm_rank_ mpi_comm_rank__
286: #define mpi_abort_ mpi_abort__
287: #define mpi_reduce_ mpi_reduce__
288: #define mpi_allreduce_ mpi_allreduce__
289: #define mpi_barrier_ mpi_barrier__
290: #define mpi_bcast_ mpi_bcast__
291: #define mpi_gather_ mpi_gather__
292: #define mpi_allgather_ mpi_allgather__
293: #define mpi_comm_split_ mpi_comm_split__
294: #define mpi_scan_ mpi_scan__
295: #define mpi_send_ mpi_send__
296: #define mpi_recv_ mpi_recv__
297: #define mpi_reduce_scatter_ mpi_reduce_scatter__
298: #define mpi_irecv_ mpi_irecv__
299: #define mpi_isend_ mpi_isend__
300: #define mpi_sendrecv_ mpi_sendrecv__
301: #define mpi_test_ mpi_test__
302: #define mpi_waitall_ mpi_waitall__
303: #define mpi_waitany_ mpi_waitany__
304: #define mpi_allgatherv_ mpi_allgatherv__
305: #define mpi_alltoallv_ mpi_alltoallv__
306: #define mpi_comm_create_ mpi_comm_create__
307: #define mpi_address_ mpi_address__
308: #define mpi_pack_ mpi_pack__
309: #define mpi_unpack_ mpi_unpack__
310: #define mpi_pack_size_ mpi_pack_size__
311: #define mpi_type_struct_ mpi_type_struct__
312: #define mpi_type_commit_ mpi_type_commit__
313: #define mpi_wtime_ mpi_wtime__
314: #define mpi_cancel_ mpi_cancel__
315: #define mpi_comm_dup_ mpi_comm_dup__
316: #define mpi_comm_free_ mpi_comm_free__
317: #define mpi_get_count_ mpi_get_count__
318: #define mpi_get_processor_name_ mpi_get_processor_name__
319: #define mpi_initialized_ mpi_initialized__
320: #define mpi_iprobe_ mpi_iprobe__
321: #define mpi_probe_ mpi_probe__
322: #define mpi_request_free_ mpi_request_free__
323: #define mpi_ssend_ mpi_ssend__
324: #define mpi_wait_ mpi_wait__
325: #define mpi_comm_group_ mpi_comm_group__
326: #define mpi_exscan_ mpi_exscan__
327: #endif
330: /* Do not build fortran interface if MPI namespace colision is to be avoided */
331: #if !defined(MPIUNI_AVOID_MPI_NAMESPACE)
333: void PETSC_STDCALL mpi_init_(int *ierr)
334: {
335: *MPI_Init((int*)0, (char***)0);
336: }
338: void PETSC_STDCALL mpi_finalize_(int *ierr)
339: {
340: *MPI_Finalize();
341: }
343: void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
344: {
345: *size = 1;
346: *0;
347: }
349: void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
350: {
351: *rank=0;
352: *ierr=MPI_SUCCESS;
353: }
355: void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
356: {
357: *newcomm = *comm;
358: *ierr=MPI_SUCCESS;
359: }
361: void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
362: {
363: abort();
364: *MPI_SUCCESS;
365: }
367: void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
368: {
369: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
370: *MPI_SUCCESS;
371: }
373: void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
374: {
375: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
376: *MPI_SUCCESS;
377: }
379: void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
380: {
381: *MPI_SUCCESS;
382: }
384: void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
385: {
386: *MPI_SUCCESS;
387: }
390: void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
391: {
392: MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
393: *MPI_SUCCESS;
394: }
396: void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
397: {
398: MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
399: *MPI_SUCCESS;
400: }
402: void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
403: {
404: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
405: *MPI_SUCCESS;
406: }
408: void PETSC_STDCALL mpi_send_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr )
409: {
410: *MPI_Abort(MPI_COMM_WORLD,0);
411: }
413: void PETSC_STDCALL mpi_recv_(void*buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr )
414: {
415: *MPI_Abort(MPI_COMM_WORLD,0);
416: }
418: void PETSC_STDCALL mpi_reduce_scatter_(void*sendbuf,void*recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
419: {
420: *MPI_Abort(MPI_COMM_WORLD,0);
421: }
423: void PETSC_STDCALL mpi_irecv_(void*buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
424: {
425: *MPI_Abort(MPI_COMM_WORLD,0);
426: }
428: void PETSC_STDCALL mpi_isend_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
429: {
430: *MPI_Abort(MPI_COMM_WORLD,0);
431: }
433: void PETSC_STDCALL mpi_sendrecv_(void*sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void*recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
434: {
435: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
436: *MPI_SUCCESS;
437: }
439: void PETSC_STDCALL mpi_test_(int *request,int *flag,int *status,int *ierr)
440: {
441: *MPI_Abort(MPI_COMM_WORLD,0);
442: }
444: void PETSC_STDCALL mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
445: {
446: *MPI_SUCCESS;
447: }
449: void PETSC_STDCALL mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
450: {
451: *MPI_SUCCESS;
452: }
454: void PETSC_STDCALL mpi_allgatherv_(void*sendbuf,int *sendcount,int *sendtype,void*recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
455: {
456: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
457: *MPI_SUCCESS;
458: }
460: void PETSC_STDCALL mpi_alltoallv_(void*sendbuf,int *sendcounts,int *sdispls,int *sendtype,void*recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
461: {
462: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcounts)*MPIUNI_DATASIZE[*sendtype]);
463: *MPI_SUCCESS;
464: }
466: void PETSC_STDCALL mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
467: {
468: *newcomm = *comm;
469: *MPI_SUCCESS;
470: }
472: void PETSC_STDCALL mpi_address_(void*location,MPIUNI_INTPTR *address,int *ierr)
473: {
474: *address = (MPIUNI_INTPTR) location;
475: *MPI_SUCCESS;
476: }
478: void PETSC_STDCALL mpi_pack_(void*inbuf,int *incount,int *datatype,void*outbuf,int *outsize,int *position,int *comm,int *ierr)
479: {
480: *MPI_Abort(MPI_COMM_WORLD,0);
481: }
483: void PETSC_STDCALL mpi_unpack_(void*inbuf,int *insize,int *position,void*outbuf,int *outcount,int *datatype,int *comm,int *ierr)
484: {
485: *MPI_Abort(MPI_COMM_WORLD,0);
486: }
488: void PETSC_STDCALL mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
489: {
490: *MPI_Abort(MPI_COMM_WORLD,0);
491: }
493: void PETSC_STDCALL mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
494: {
495: *MPI_Abort(MPI_COMM_WORLD,0);
496: }
498: void PETSC_STDCALL mpi_type_commit_(int *datatype,int *ierr)
499: {
500: *MPI_SUCCESS;
501: }
503: double PETSC_STDCALL mpi_wtime_(void)
504: {
505: return 0.0;
506: }
508: void PETSC_STDCALL mpi_cancel_(int *request,int *ierr)
509: {
510: *MPI_SUCCESS;
511: }
513: void PETSC_STDCALL mpi_comm_dup_(int *comm,int *out,int *ierr)
514: {
515: *out = *comm;
516: *MPI_SUCCESS;
517: }
519: void PETSC_STDCALL mpi_comm_free_(int *comm,int *ierr)
520: {
521: *MPI_SUCCESS;
522: }
524: void PETSC_STDCALL mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
525: {
526: *MPI_Abort(MPI_COMM_WORLD,0);
527: }
529: /* duplicate from fortranimpl.h */
530: #if defined(PETSC_HAVE_FORTRAN_MIXED_STR_ARG)
531: #define PETSC_MIXED_LEN(len) ,int len
532: #define PETSC_END_LEN(len)
533: #else
534: #define PETSC_MIXED_LEN(len)
535: #define PETSC_END_LEN(len) ,int len
536: #endif
538: void PETSC_STDCALL mpi_get_processor_name_(char *name PETSC_MIXED_LEN(len),int *result_len,int *ierr PETSC_END_LEN(len))
539: {
540: MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
541: *result_len = 9;
542: *MPI_SUCCESS;
543: }
545: void PETSC_STDCALL mpi_initialized_(int *flag,int *ierr)
546: {
547: *flag = MPI_was_initialized;
548: *MPI_SUCCESS;
549: }
551: void PETSC_STDCALL mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
552: {
553: *MPI_SUCCESS;
554: }
556: void PETSC_STDCALL mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
557: {
558: *MPI_SUCCESS;
559: }
561: void PETSC_STDCALL mpi_request_free_(int *request,int *ierr)
562: {
563: *MPI_SUCCESS;
564: }
566: void PETSC_STDCALL mpi_ssend_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
567: {
568: *MPI_Abort(MPI_COMM_WORLD,0);
569: }
571: void PETSC_STDCALL mpi_wait_(int *request,int *status,int *ierr)
572: {
573: *MPI_SUCCESS;
574: }
576: void PETSC_STDCALL mpi_comm_group_(int*comm,int*group,int *ierr)
577: {
578: *MPI_SUCCESS;
579: }
581: void PETSC_STDCALL mpi_exscan_(void*sendbuf,void*recvbuf,int*count,int*datatype,int*op,int*comm,int*ierr)
582: {
583: *MPI_SUCCESS;
584: }
586: #endif /* MPIUNI_AVOID_MPI_NAMESPACE */
588: #if defined(__cplusplus)
589: }
590: #endif