Actual source code: mpi.c

petsc-3.4.5 2014-06-29
  1: /*
  2:       This provides a few of the MPI-uni functions that cannot be implemented
  3:     with C macros
  4: */
  5: #include <mpiuni/mpi.h>
  7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
  8: #endif
  9: #if !defined(PETSC_STDCALL)
 10: #define PETSC_STDCALL
 11: #endif
 12: #include <stdio.h>
 13: #if defined(PETSC_HAVE_STDLIB_H)
 14: #include <stdlib.h>
 15: #endif

 17: #define MPI_SUCCESS 0
 18: #define MPI_FAILURE 1

 20: void *MPIUNI_TMP         = 0;
 21: int  MPIUNI_DATASIZE[10] = {sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char),2*sizeof(int),4*sizeof(double),4,8,2*sizeof(double)};
 22: /*
 23:        With MPI Uni there are exactly four distinct communicators:
 24:     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)

 26:     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
 27:      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).

 29: */
 30: #define MAX_ATTR 128

 32: typedef struct {
 33:   void *attribute_val;
 34:   int  active;
 35: } MPI_Attr;

 37: typedef struct {
 38:   void                *extra_state;
 39:   MPI_Delete_function *del;
 40: } MPI_Attr_keyval;

 42: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
 43: static MPI_Attr        attr[4][MAX_ATTR];
 44: static int             num_attr = 1,mpi_tag_ub = 100000000;

 46: #if defined(__cplusplus)
 47: extern "C" {
 48: #endif

 50: /*
 51:    To avoid problems with prototypes to the system memcpy() it is duplicated here
 52: */
 53: int MPIUNI_Memcpy(void *a,const void *b,int n)
 54: {
 55:   int  i;
 56:   char *aa= (char*)a;
 57:   char *bb= (char*)b;

 59:   if (b == MPI_IN_PLACE) return 0;
 60:   for (i=0; i<n; i++) aa[i] = bb[i];
 61:   return 0;
 62: }

 64: /*
 65:    Used to set the built-in MPI_TAG_UB attribute
 66: */
 67: static int Keyval_setup(void)
 68: {
 69:   attr[MPI_COMM_WORLD-1][0].active        = 1;
 70:   attr[MPI_COMM_WORLD-1][0].attribute_val = &mpi_tag_ub;
 71:   attr[MPI_COMM_SELF-1][0].active         = 1;
 72:   attr[MPI_COMM_SELF-1][0].attribute_val  = &mpi_tag_ub;
 73:   return 0;
 74: }

 76: int MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
 77: {
 78:   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);

 80:   attr_keyval[num_attr].extra_state = extra_state;
 81:   attr_keyval[num_attr].del         = delete_fn;
 82:   *keyval                           = num_attr++;
 83:   return 0;
 84: }

 86: int MPI_Keyval_free(int *keyval)
 87: {
 88:   attr_keyval[*keyval].extra_state = 0;
 89:   attr_keyval[*keyval].del         = 0;

 91:   *keyval = 0;
 92:   return MPI_SUCCESS;
 93: }

 95: int MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
 96: {
 97:   if (comm-1 < 0 || comm-1 > 3) return 1;
 98:   attr[comm-1][keyval].active        = 1;
 99:   attr[comm-1][keyval].attribute_val = attribute_val;
100:   return MPI_SUCCESS;
101: }

103: int MPI_Attr_delete(MPI_Comm comm,int keyval)
104: {
105:   if (comm-1 < 0 || comm-1 > 3) return 1;
106:   if (attr[comm-1][keyval].active && attr_keyval[keyval].del) {
107:     void *save_attribute_val = attr[comm-1][keyval].attribute_val;
108:     attr[comm-1][keyval].active        = 0;
109:     attr[comm-1][keyval].attribute_val = 0;
110:     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
111:   }
112:   return MPI_SUCCESS;
113: }

115: int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
116: {
117:   if (comm-1 < 0 || comm-1 > 3) return 1;
118:   if (!keyval) Keyval_setup();
119:   *flag                  = attr[comm-1][keyval].active;
120:   *(void**)attribute_val = attr[comm-1][keyval].attribute_val;
121:   return MPI_SUCCESS;
122: }

124: static int dups[4] = {1,1,1,1};
125: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
126: {
127:   if (comm-1 < 0 || comm-1 > 3) return 1;
128:   dups[comm-1]++;
129:   *newcomm =  comm;
130:   return MPI_SUCCESS;
131: }

133: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
134: {
135:   if (comm-1 < 0 || comm-1 > 3) return 1;
136:   if (comm == MPI_COMM_WORLD || comm == MPI_COMM_SELF) *out = comm + 2;
137:   else {
138:     *out = comm;
139:     dups[comm-1]++;
140:   }
141:   return 0;
142: }

144: int MPI_Comm_free(MPI_Comm *comm)
145: {
146:   int i;

148:   if (*comm-1 < 0 || *comm-1 > 3) return 1;
149:   if (dups[*comm-1] == 1) {
150:     for (i=0; i<num_attr; i++) {
151:       if (attr[*comm-1][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[*comm-1][i].attribute_val,attr_keyval[i].extra_state);
152:       attr[*comm-1][i].active        = 0;
153:       attr[*comm-1][i].attribute_val = 0;
154:     }
155:     dups[*comm-1] = 1;
156:     *comm = 0;
157:   } else if (dups[*comm-1] > 1) dups[*comm-1]--;
158:   return MPI_SUCCESS;
159: }

161: int MPI_Comm_size(MPI_Comm comm, int *size)
162: {
163:   *size=1;
164:   return MPI_SUCCESS;
165: }

167: int MPI_Comm_rank(MPI_Comm comm, int *rank)
168: {
169:   *rank=0;
170:   return MPI_SUCCESS;
171: }

173: int MPI_Abort(MPI_Comm comm,int errorcode)
174: {
175:   abort();
176:   return MPI_SUCCESS;
177: }

179: /* --------------------------------------------------------------------------*/

181: static int MPI_was_initialized = 0;
182: static int MPI_was_finalized   = 0;

184: int MPI_Init(int *argc, char ***argv)
185: {
186:   if (MPI_was_initialized) return 1;
187:   if (MPI_was_finalized) return 1;
188:   MPI_was_initialized = 1;
189:   return 0;
190: }

192: int MPI_Finalize(void)
193: {
194:   MPI_Comm comm;
195:   if (MPI_was_finalized) return 1;
196:   if (!MPI_was_initialized) return 1;
197:   comm = MPI_COMM_WORLD;
198:   MPI_Comm_free(&comm);
199:   comm = MPI_COMM_SELF;
200:   MPI_Comm_free(&comm);
201:   MPI_was_finalized = 1;
202:   return 0;
203: }

205: int MPI_Initialized(int *flag)
206: {
207:   *flag = MPI_was_initialized;
208:   return 0;
209: }

211: int MPI_Finalized(int *flag)
212: {
213:   *flag = MPI_was_finalized;
214:   return 0;
215: }

217: /* -------------------     Fortran versions of several routines ------------------ */

219: #if defined(PETSC_HAVE_FORTRAN_CAPS)
220: #define mpi_init_             MPI_INIT
221: #define mpi_finalize_         MPI_FINALIZE
222: #define mpi_comm_size_        MPI_COMM_SIZE
223: #define mpi_comm_rank_        MPI_COMM_RANK
224: #define mpi_abort_            MPI_ABORT
225: #define mpi_reduce_           MPI_REDUCE
226: #define mpi_allreduce_        MPI_ALLREDUCE
227: #define mpi_barrier_          MPI_BARRIER
228: #define mpi_bcast_            MPI_BCAST
229: #define mpi_gather_           MPI_GATHER
230: #define mpi_allgather_        MPI_ALLGATHER
231: #define mpi_comm_split_       MPI_COMM_SPLIT
232: #define mpi_scan_             MPI_SCAN
233: #define mpi_send_             MPI_SEND
234: #define mpi_recv_             MPI_RECV
235: #define mpi_reduce_scatter_   MPI_REDUCE_SCATTER
236: #define mpi_irecv_            MPI_IRECV
237: #define mpi_isend_            MPI_ISEND
238: #define mpi_sendrecv_         MPI_SENDRECV
239: #define mpi_test_             MPI_TEST
240: #define mpi_waitall_          MPI_WAITALL
241: #define mpi_waitany_          MPI_WAITANY
242: #define mpi_allgatherv_       MPI_ALLGATHERV
243: #define mpi_alltoallv_        MPI_ALLTOALLV
244: #define mpi_comm_create_      MPI_COMM_CREATE
245: #define mpi_address_          MPI_ADDRESS
246: #define mpi_pack_             MPI_PACK
247: #define mpi_unpack_           MPI_UNPACK
248: #define mpi_pack_size_        MPI_PACK_SIZE
249: #define mpi_type_struct_      MPI_TYPE_STRUCT
250: #define mpi_type_commit_      MPI_TYPE_COMMIT
251: #define mpi_wtime_            MPI_WTIME
252: #define mpi_cancel_           MPI_CANCEL
253: #define mpi_comm_dup_         MPI_COMM_DUP
254: #define mpi_comm_free_        MPI_COMM_FREE
255: #define mpi_get_count_        MPI_GET_COUNT
256: #define mpi_get_processor_name_ MPI_GET_PROCESSOR_NAME
257: #define mpi_initialized_      MPI_INITIALIZED
258: #define mpi_iprobe_           MPI_IPROBE
259: #define mpi_probe_            MPI_PROBE
260: #define mpi_request_free_     MPI_REQUEST_FREE
261: #define mpi_ssend_            MPI_SSEND
262: #define mpi_wait_             MPI_WAIT
263: #define mpi_comm_group_       MPI_COMM_GROUP
264: #define mpi_exscan_           MPI_EXSCAN
265: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
266: #define mpi_init_             mpi_init
267: #define mpi_finalize_         mpi_finalize
268: #define mpi_comm_size_        mpi_comm_size
269: #define mpi_comm_rank_        mpi_comm_rank
270: #define mpi_abort_            mpi_abort
271: #define mpi_reduce_           mpi_reduce
272: #define mpi_allreduce_        mpi_allreduce
273: #define mpi_barrier_          mpi_barrier
274: #define mpi_bcast_            mpi_bcast
275: #define mpi_gather_           mpi_gather
276: #define mpi_allgather_        mpi_allgather
277: #define mpi_comm_split_       mpi_comm_split
278: #define mpi_scan_             mpi_scan
279: #define mpi_send_             mpi_send
280: #define mpi_recv_             mpi_recv
281: #define mpi_reduce_scatter_   mpi_reduce_scatter
282: #define mpi_irecv_            mpi_irecv
283: #define mpi_isend_            mpi_isend
284: #define mpi_sendrecv_         mpi_sendrecv
285: #define mpi_test_             mpi_test
286: #define mpi_waitall_          mpi_waitall
287: #define mpi_waitany_          mpi_waitany
288: #define mpi_allgatherv_       mpi_allgatherv
289: #define mpi_alltoallv_        mpi_alltoallv
290: #define mpi_comm_create_      mpi_comm_create
291: #define mpi_address_          mpi_address
292: #define mpi_pack_             mpi_pack
293: #define mpi_unpack_           mpi_unpack
294: #define mpi_pack_size_        mpi_pack_size
295: #define mpi_type_struct_      mpi_type_struct
296: #define mpi_type_commit_      mpi_type_commit
297: #define mpi_wtime_            mpi_wtime
298: #define mpi_cancel_           mpi_cancel
299: #define mpi_comm_dup_         mpi_comm_dup
300: #define mpi_comm_free_        mpi_comm_free
301: #define mpi_get_count_        mpi_get_count
302: #define mpi_get_processor_name_ mpi_get_processor_name
303: #define mpi_initialized_      mpi_initialized
304: #define mpi_iprobe_           mpi_iprobe
305: #define mpi_probe_            mpi_probe
306: #define mpi_request_free_     mpi_request_free
307: #define mpi_ssend_            mpi_ssend
308: #define mpi_wait_             mpi_wait
309: #define mpi_comm_group_       mpi_comm_group
310: #define mpi_exscan_           mpi_exscan
311: #endif

313: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
314: #define mpi_init_             mpi_init__
315: #define mpi_finalize_         mpi_finalize__
316: #define mpi_comm_size_        mpi_comm_size__
317: #define mpi_comm_rank_        mpi_comm_rank__
318: #define mpi_abort_            mpi_abort__
319: #define mpi_reduce_           mpi_reduce__
320: #define mpi_allreduce_        mpi_allreduce__
321: #define mpi_barrier_          mpi_barrier__
322: #define mpi_bcast_            mpi_bcast__
323: #define mpi_gather_           mpi_gather__
324: #define mpi_allgather_        mpi_allgather__
325: #define mpi_comm_split_       mpi_comm_split__
326: #define mpi_scan_             mpi_scan__
327: #define mpi_send_             mpi_send__
328: #define mpi_recv_             mpi_recv__
329: #define mpi_reduce_scatter_   mpi_reduce_scatter__
330: #define mpi_irecv_            mpi_irecv__
331: #define mpi_isend_            mpi_isend__
332: #define mpi_sendrecv_         mpi_sendrecv__
333: #define mpi_test_             mpi_test__
334: #define mpi_waitall_          mpi_waitall__
335: #define mpi_waitany_          mpi_waitany__
336: #define mpi_allgatherv_       mpi_allgatherv__
337: #define mpi_alltoallv_        mpi_alltoallv__
338: #define mpi_comm_create_      mpi_comm_create__
339: #define mpi_address_          mpi_address__
340: #define mpi_pack_             mpi_pack__
341: #define mpi_unpack_           mpi_unpack__
342: #define mpi_pack_size_        mpi_pack_size__
343: #define mpi_type_struct_      mpi_type_struct__
344: #define mpi_type_commit_      mpi_type_commit__
345: #define mpi_wtime_            mpi_wtime__
346: #define mpi_cancel_           mpi_cancel__
347: #define mpi_comm_dup_         mpi_comm_dup__
348: #define mpi_comm_free_        mpi_comm_free__
349: #define mpi_get_count_        mpi_get_count__
350: #define mpi_get_processor_name_ mpi_get_processor_name__
351: #define mpi_initialized_      mpi_initialized__
352: #define mpi_iprobe_           mpi_iprobe__
353: #define mpi_probe_            mpi_probe__
354: #define mpi_request_free_     mpi_request_free__
355: #define mpi_ssend_            mpi_ssend__
356: #define mpi_wait_             mpi_wait__
357: #define mpi_comm_group_       mpi_comm_group__
358: #define mpi_exscan_           mpi_exscan__
359: #endif


362: /* Do not build fortran interface if MPI namespace colision is to be avoided */
363: #if !defined(MPIUNI_AVOID_MPI_NAMESPACE)

365: void PETSC_STDCALL  mpi_init_(int *ierr)
366: {
367:   *MPI_Init((int*)0, (char***)0);
368: }

370: void PETSC_STDCALL  mpi_finalize_(int *ierr)
371: {
372:   *MPI_Finalize();
373: }

375: void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
376: {
377:   *size = 1;
378:   *0;
379: }

381: void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
382: {
383:   *rank=0;
384:   *ierr=MPI_SUCCESS;
385: }

387: void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
388: {
389:   *newcomm = *comm;
390:   *ierr    =MPI_SUCCESS;
391: }

393: void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
394: {
395:   abort();
396:   *MPI_SUCCESS;
397: }

399: void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
400: {
401:   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
402:   *MPI_SUCCESS;
403: }

405: void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
406: {
407:   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
408:   *MPI_SUCCESS;
409: }

411: void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
412: {
413:   *MPI_SUCCESS;
414: }

416: void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
417: {
418:   *MPI_SUCCESS;
419: }


422: void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
423: {
424:   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
425:   *MPI_SUCCESS;
426: }

428: void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
429: {
430:   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
431:   *MPI_SUCCESS;
432: }

434: void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
435: {
436:   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
437:   *MPI_SUCCESS;
438: }

440: void PETSC_STDCALL mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
441: {
442:   *MPI_Abort(MPI_COMM_WORLD,0);
443: }

445: void PETSC_STDCALL mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
446: {
447:   *MPI_Abort(MPI_COMM_WORLD,0);
448: }

450: void PETSC_STDCALL mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
451: {
452:   *MPI_Abort(MPI_COMM_WORLD,0);
453: }

455: void PETSC_STDCALL mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
456: {
457:   *MPI_Abort(MPI_COMM_WORLD,0);
458: }

460: void PETSC_STDCALL mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
461: {
462:   *MPI_Abort(MPI_COMM_WORLD,0);
463: }

465: void PETSC_STDCALL mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
466: {
467:   MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
468:   *MPI_SUCCESS;
469: }

471: void PETSC_STDCALL mpi_test_(int *request,int *flag,int *status,int *ierr)
472: {
473:   *MPI_Abort(MPI_COMM_WORLD,0);
474: }

476: void PETSC_STDCALL mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
477: {
478:   *MPI_SUCCESS;
479: }

481: void PETSC_STDCALL mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
482: {
483:   *MPI_SUCCESS;
484: }

486: void PETSC_STDCALL mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
487: {
488:   MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
489:   *MPI_SUCCESS;
490: }

492: void PETSC_STDCALL mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
493: {
494:   MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcounts)*MPIUNI_DATASIZE[*sendtype]);
495:   *MPI_SUCCESS;
496: }

498: void PETSC_STDCALL mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
499: {
500:   *newcomm =  *comm;
501:   *MPI_SUCCESS;
502: }

504: void PETSC_STDCALL mpi_address_(void *location,MPIUNI_INTPTR *address,int *ierr)
505: {
506:   *address =  (MPIUNI_INTPTR) location;
507:   *MPI_SUCCESS;
508: }

510: void PETSC_STDCALL mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
511: {
512:   *MPI_Abort(MPI_COMM_WORLD,0);
513: }

515: void PETSC_STDCALL mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
516: {
517:   *MPI_Abort(MPI_COMM_WORLD,0);
518: }

520: void PETSC_STDCALL mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
521: {
522:   *MPI_Abort(MPI_COMM_WORLD,0);
523: }

525: void PETSC_STDCALL mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
526: {
527:   *MPI_Abort(MPI_COMM_WORLD,0);
528: }

530: void PETSC_STDCALL mpi_type_commit_(int *datatype,int *ierr)
531: {
532:   *MPI_SUCCESS;
533: }

535: double PETSC_STDCALL mpi_wtime_(void)
536: {
537:   return 0.0;
538: }

540: void PETSC_STDCALL mpi_cancel_(int *request,int *ierr)
541: {
542:   *MPI_SUCCESS;
543: }

545: void PETSC_STDCALL mpi_comm_dup_(int *comm,int *out,int *ierr)
546: {
547:   *out  = *comm;
548:   *MPI_SUCCESS;
549: }

551: void PETSC_STDCALL mpi_comm_free_(int *comm,int *ierr)
552: {
553:   *MPI_SUCCESS;
554: }

556: void PETSC_STDCALL mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
557: {
558:   *MPI_Abort(MPI_COMM_WORLD,0);
559: }

561: /* duplicate from fortranimpl.h */
562: #if defined(PETSC_HAVE_FORTRAN_MIXED_STR_ARG)
563: #define PETSC_MIXED_LEN(len) ,int len
564: #define PETSC_END_LEN(len)
565: #else
566: #define PETSC_MIXED_LEN(len)
567: #define PETSC_END_LEN(len)   ,int len
568: #endif

570: void PETSC_STDCALL mpi_get_processor_name_(char *name PETSC_MIXED_LEN(len),int *result_len,int *ierr PETSC_END_LEN(len))
571: {
572:   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
573:   *result_len = 9;
574:   *MPI_SUCCESS;
575: }

577: void PETSC_STDCALL mpi_initialized_(int *flag,int *ierr)
578: {
579:   *flag = MPI_was_initialized;
580:   *MPI_SUCCESS;
581: }

583: void PETSC_STDCALL mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
584: {
585:   *MPI_SUCCESS;
586: }

588: void PETSC_STDCALL mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
589: {
590:   *MPI_SUCCESS;
591: }

593: void PETSC_STDCALL mpi_request_free_(int *request,int *ierr)
594: {
595:   *MPI_SUCCESS;
596: }

598: void PETSC_STDCALL mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
599: {
600:   *MPI_Abort(MPI_COMM_WORLD,0);
601: }

603: void PETSC_STDCALL mpi_wait_(int *request,int *status,int *ierr)
604: {
605:   *MPI_SUCCESS;
606: }

608: void PETSC_STDCALL mpi_comm_group_(int *comm,int *group,int *ierr)
609: {
610:   *MPI_SUCCESS;
611: }

613: void PETSC_STDCALL mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
614: {
615:   *MPI_SUCCESS;
616: }

618: #endif /* MPIUNI_AVOID_MPI_NAMESPACE */

620: #if defined(__cplusplus)
621: }
622: #endif