Actual source code: mprint.c
1: /*
2: Utilites routines to add simple ASCII IO capability.
3: */
4: #include src/sys/src/fileio/mprint.h
5: /*
6: If petsc_history is on, then all Petsc*Printf() results are saved
7: if the appropriate (usually .petschistory) file.
8: */
13: PetscErrorCode PetscFormatConvert(const char *format,char *newformat)
14: {
15: PetscInt i = 0,j = 0;
17: while (format[i] && i < 8*1024-1) {
18: if (format[i] == '%' && format[i+1] == 'D') {
19: newformat[j++] = '%';
20: #if defined(PETSC_USE_32BIT_INT)
21: newformat[j++] = 'd';
22: #else
23: newformat[j++] = 'l';
24: newformat[j++] = 'l';
25: newformat[j++] = 'd';
26: #endif
27: i += 2;
28: } else if (format[i] == '%' && format[i+1] >= '1' && format[i+1] <= '9' && format[i+2] == 'D') {
29: newformat[j++] = '%';
30: newformat[j++] = format[i+1];
31: #if defined(PETSC_USE_32BIT_INT)
32: newformat[j++] = 'd';
33: #else
34: newformat[j++] = 'l';
35: newformat[j++] = 'l';
36: newformat[j++] = 'd';
37: #endif
38: i += 3;
39: }else {
40: newformat[j++] = format[i++];
41: }
42: }
43: newformat[j] = 0;
44: return 0;
45: }
46:
49: /*
50: No error handling because may be called by error handler
51: */
52: PetscErrorCode PetscVSNPrintf(char *str,size_t len,const char *format,va_list Argp)
53: {
54: /* no malloc since may be called by error handler */
55: char newformat[8*1024];
56:
57: PetscFormatConvert(format,newformat);
58: #if defined(PETSC_HAVE_VPRINTF_CHAR)
59: vsprintf(str,newformat,(char *)Argp);
60: #else
61: vsprintf(str,newformat,Argp);
62: #endif
63: return 0;
64: }
68: /*
69: No error handling because may be called by error handler
70: */
71: PetscErrorCode PetscVFPrintf(FILE *fd,const char *format,va_list Argp)
72: {
73: /* no malloc since may be called by error handler */
74: char newformat[8*1024];
75:
76: PetscFormatConvert(format,newformat);
77: #if defined(PETSC_HAVE_VPRINTF_CHAR)
78: vfprintf(fd,newformat,(char *)Argp);
79: #else
80: vfprintf(fd,newformat,Argp);
81: #endif
82: return 0;
83: }
85: /* ----------------------------------------------------------------------- */
87: PrintfQueue queue = 0,queuebase = 0;
88: int queuelength = 0;
89: FILE *queuefile = PETSC_NULL;
93: /*@C
94: PetscSynchronizedPrintf - Prints synchronized output from several processors.
95: Output of the first processor is followed by that of the second, etc.
97: Not Collective
99: Input Parameters:
100: + comm - the communicator
101: - format - the usual printf() format string
103: Level: intermediate
105: Notes:
106: REQUIRES a intervening call to PetscSynchronizedFlush() for the information
107: from all the processors to be printed.
109: Fortran Note:
110: The call sequence is PetscSynchronizedPrintf(PetscViewer, character(*), PetscErrorCode ierr) from Fortran.
111: That is, you can only pass a single character string from Fortran.
113: The length of the formatted message cannot exceed QUEUESTRINGSIZE characters.
115: .seealso: PetscSynchronizedFlush(), PetscSynchronizedFPrintf(), PetscFPrintf(),
116: PetscPrintf(), PetscViewerASCIIPrintf(), PetscViewerASCIISynchronizedPrintf()
117: @*/
118: PetscErrorCode PetscSynchronizedPrintf(MPI_Comm comm,const char format[],...)
119: {
121: PetscMPIInt rank;
124: MPI_Comm_rank(comm,&rank);
125:
126: /* First processor prints immediately to stdout */
127: if (!rank) {
128: va_list Argp;
129: va_start(Argp,format);
130: PetscVFPrintf(stdout,format,Argp);
131: fflush(stdout);
132: if (petsc_history) {
133: PetscVFPrintf(petsc_history,format,Argp);
134: fflush(petsc_history);
135: }
136: va_end(Argp);
137: } else { /* other processors add to local queue */
138: va_list Argp;
139: PrintfQueue next;
141: PetscNew(struct _PrintfQueue,&next);
142: if (queue) {queue->next = next; queue = next; queue->next = 0;}
143: else {queuebase = queue = next;}
144: queuelength++;
145: va_start(Argp,format);
146: PetscMemzero(next->string,QUEUESTRINGSIZE);
147: PetscVSNPrintf(next->string,QUEUESTRINGSIZE,format,Argp);
148: va_end(Argp);
149: }
150:
151: return(0);
152: }
153:
156: /*@C
157: PetscSynchronizedFPrintf - Prints synchronized output to the specified file from
158: several processors. Output of the first processor is followed by that of the
159: second, etc.
161: Not Collective
163: Input Parameters:
164: + comm - the communicator
165: . fd - the file pointer
166: - format - the usual printf() format string
168: Level: intermediate
170: Notes:
171: REQUIRES a intervening call to PetscSynchronizedFlush() for the information
172: from all the processors to be printed.
174: The length of the formatted message cannot exceed QUEUESTRINGSIZE characters.
176: Contributed by: Matthew Knepley
178: .seealso: PetscSynchronizedPrintf(), PetscSynchronizedFlush(), PetscFPrintf(),
179: PetscFOpen(), PetscViewerASCIISynchronizedPrintf(), PetscViewerASCIIPrintf()
181: @*/
182: PetscErrorCode PetscSynchronizedFPrintf(MPI_Comm comm,FILE* fp,const char format[],...)
183: {
185: PetscMPIInt rank;
188: MPI_Comm_rank(comm,&rank);
189:
190: /* First processor prints immediately to fp */
191: if (!rank) {
192: va_list Argp;
193: va_start(Argp,format);
194: PetscVFPrintf(fp,format,Argp);
195: fflush(fp);
196: queuefile = fp;
197: if (petsc_history) {
198: PetscVFPrintf(petsc_history,format,Argp);
199: fflush(petsc_history);
200: }
201: va_end(Argp);
202: } else { /* other processors add to local queue */
203: va_list Argp;
204: PrintfQueue next;
205: PetscNew(struct _PrintfQueue,&next);
206: if (queue) {queue->next = next; queue = next; queue->next = 0;}
207: else {queuebase = queue = next;}
208: queuelength++;
209: va_start(Argp,format);
210: PetscMemzero(next->string,QUEUESTRINGSIZE);
211: PetscVSNPrintf(next->string,QUEUESTRINGSIZE,format,Argp);
212: va_end(Argp);
213: }
214: return(0);
215: }
219: /*@C
220: PetscSynchronizedFlush - Flushes to the screen output from all processors
221: involved in previous PetscSynchronizedPrintf() calls.
223: Collective on MPI_Comm
225: Input Parameters:
226: . comm - the communicator
228: Level: intermediate
230: Notes:
231: Usage of PetscSynchronizedPrintf() and PetscSynchronizedFPrintf() with
232: different MPI communicators REQUIRES an intervening call to PetscSynchronizedFlush().
234: .seealso: PetscSynchronizedPrintf(), PetscFPrintf(), PetscPrintf(), PetscViewerASCIIPrintf(),
235: PetscViewerASCIISynchronizedPrintf()
236: @*/
237: PetscErrorCode PetscSynchronizedFlush(MPI_Comm comm)
238: {
240: PetscMPIInt rank,size,tag,i,j,n;
241: char message[QUEUESTRINGSIZE];
242: MPI_Status status;
243: FILE *fd;
246: MPI_Comm_rank(comm,&rank);
247: MPI_Comm_size(comm,&size);
249: PetscCommGetNewTag(comm,&tag);
250: /* First processor waits for messages from all other processors */
251: if (!rank) {
252: if (queuefile) {
253: fd = queuefile;
254: } else {
255: fd = stdout;
256: }
257: for (i=1; i<size; i++) {
258: MPI_Recv(&n,1,MPI_INT,i,tag,comm,&status);
259: for (j=0; j<n; j++) {
260: MPI_Recv(message,QUEUESTRINGSIZE,MPI_CHAR,i,tag,comm,&status);
261: fprintf(fd,"%s",message);
262: if (petsc_history) {
263: fprintf(petsc_history,"%s",message);
264: }
265: }
266: }
267: fflush(fd);
268: if (petsc_history) fflush(petsc_history);
269: queuefile = PETSC_NULL;
270: } else { /* other processors send queue to processor 0 */
271: PrintfQueue next = queuebase,previous;
273: MPI_Send(&queuelength,1,MPI_INT,0,tag,comm);
274: for (i=0; i<queuelength; i++) {
275: MPI_Send(next->string,QUEUESTRINGSIZE,MPI_CHAR,0,tag,comm);
276: previous = next;
277: next = next->next;
278: PetscFree(previous);
279: }
280: queue = 0;
281: queuelength = 0;
282: }
283: return(0);
284: }
286: /* ---------------------------------------------------------------------------------------*/
290: /*@C
291: PetscFPrintf - Prints to a file, only from the first
292: processor in the communicator.
294: Not Collective
296: Input Parameters:
297: + comm - the communicator
298: . fd - the file pointer
299: - format - the usual printf() format string
301: Level: intermediate
303: Fortran Note:
304: This routine is not supported in Fortran.
306: Concepts: printing^in parallel
307: Concepts: printf^in parallel
309: .seealso: PetscPrintf(), PetscSynchronizedPrintf(), PetscViewerASCIIPrintf(),
310: PetscViewerASCIISynchronizedPrintf(), PetscSynchronizedFlush()
311: @*/
312: PetscErrorCode PetscFPrintf(MPI_Comm comm,FILE* fd,const char format[],...)
313: {
315: PetscMPIInt rank;
318: MPI_Comm_rank(comm,&rank);
319: if (!rank) {
320: va_list Argp;
321: va_start(Argp,format);
322: PetscVFPrintf(fd,format,Argp);
323: fflush(fd);
324: if (petsc_history) {
325: PetscVFPrintf(petsc_history,format,Argp);
326: fflush(petsc_history);
327: }
328: va_end(Argp);
329: }
330: return(0);
331: }
335: /*@C
336: PetscPrintf - Prints to standard out, only from the first
337: processor in the communicator.
339: Not Collective
341: Input Parameters:
342: + comm - the communicator
343: - format - the usual printf() format string
345: Level: intermediate
347: Fortran Note:
348: The call sequence is PetscPrintf(PetscViewer, character(*), PetscErrorCode ierr) from Fortran.
349: That is, you can only pass a single character string from Fortran.
351: Notes: %A is replace with %g unless the value is < 1.e-12 when it is
352: replaced with < 1.e-12
354: Concepts: printing^in parallel
355: Concepts: printf^in parallel
357: .seealso: PetscFPrintf(), PetscSynchronizedPrintf()
358: @*/
359: PetscErrorCode PetscPrintf(MPI_Comm comm,const char format[],...)
360: {
362: PetscMPIInt rank;
363: size_t len;
364: char *nformat,*sub1,*sub2;
365: PetscReal value;
368: if (!comm) comm = PETSC_COMM_WORLD;
369: MPI_Comm_rank(comm,&rank);
370: if (!rank) {
371: va_list Argp;
372: va_start(Argp,format);
374: PetscStrstr(format,"%A",&sub1);
375: if (sub1) {
376: PetscStrstr(format,"%",&sub2);
377: if (sub1 != sub2) SETERRQ(PETSC_ERR_ARG_WRONG,"%%A format must be first in format string");
378: PetscStrlen(format,&len);
379: PetscMalloc((len+16)*sizeof(char),&nformat);
380: PetscStrcpy(nformat,format);
381: PetscStrstr(nformat,"%",&sub2);
382: sub2[0] = 0;
383: value = (double)va_arg(Argp,double);
384: if (PetscAbsReal(value) < 1.e-12) {
385: PetscStrcat(nformat,"< 1.e-12");
386: } else {
387: PetscStrcat(nformat,"%g");
388: va_end(Argp);
389: va_start(Argp,format);
390: }
391: PetscStrcat(nformat,sub1+2);
392: } else {
393: nformat = (char*)format;
394: }
395: PetscVFPrintf(stdout,nformat,Argp);
396: fflush(stdout);
397: if (petsc_history) {
398: PetscVFPrintf(petsc_history,nformat,Argp);
399: fflush(petsc_history);
400: }
401: va_end(Argp);
402: if (sub1) {PetscFree(nformat);}
403: }
404: return(0);
405: }
407: /* ---------------------------------------------------------------------------------------*/
410: PetscErrorCode PetscHelpPrintfDefault(MPI_Comm comm,const char format[],...)
411: {
413: PetscMPIInt rank;
416: if (!comm) comm = PETSC_COMM_WORLD;
417: MPI_Comm_rank(comm,&rank);
418: if (!rank) {
419: va_list Argp;
420: va_start(Argp,format);
421: PetscVFPrintf(stdout,format,Argp);
422: fflush(stdout);
423: if (petsc_history) {
424: PetscVFPrintf(petsc_history,format,Argp);
425: fflush(petsc_history);
426: }
427: va_end(Argp);
428: }
429: return(0);
430: }
432: /* ---------------------------------------------------------------------------------------*/
434: static char arch[10],hostname[64],username[16],pname[PETSC_MAX_PATH_LEN],date[64];
435: static PetscTruth PetscErrorPrintfInitializeCalled = PETSC_FALSE;
439: /*
440: Initializes arch, hostname, username,date so that system calls do NOT need
441: to be made during the error handler.
442: */
443: PetscErrorCode PetscErrorPrintfInitialize()
444: {
448: PetscGetArchType(arch,10);
449: PetscGetHostName(hostname,64);
450: PetscGetUserName(username,16);
451: PetscGetProgramName(pname,PETSC_MAX_PATH_LEN);
452: PetscGetDate(date,64);
453: PetscErrorPrintfInitializeCalled = PETSC_TRUE;
454: return(0);
455: }
460: PetscErrorCode PetscErrorPrintfDefault(const char format[],...)
461: {
462: va_list Argp;
463: static PetscTruth PetscErrorPrintfCalled = PETSC_FALSE;
464: static PetscTruth InPetscErrorPrintfDefault = PETSC_FALSE;
465: static FILE *fd;
466: char version[256];
467: /*
468: InPetscErrorPrintfDefault is used to prevent the error handler called (potentially)
469: from PetscSleep(), PetscGetArchName(), ... below from printing its own error message.
470: */
472: /*
474: it may be called by PetscStackView().
476: This function does not do error checking because it is called by the error handlers.
477: */
479: if (!PetscErrorPrintfCalled) {
480: PetscTruth use_stderr;
482: PetscErrorPrintfCalled = PETSC_TRUE;
483: InPetscErrorPrintfDefault = PETSC_TRUE;
485: PetscOptionsHasName(PETSC_NULL,"-error_output_stderr",&use_stderr);
486: if (use_stderr) {
487: fd = stderr;
488: } else {
489: fd = stdout;
490: }
492: /*
493: On the SGI machines and Cray T3E, if errors are generated "simultaneously" by
494: different processors, the messages are printed all jumbled up; to try to
495: prevent this we have each processor wait based on their rank
496: */
497: #if defined(PETSC_CAN_SLEEP_AFTER_ERROR)
498: {
499: PetscMPIInt rank;
500: if (PetscGlobalRank > 8) rank = 8; else rank = PetscGlobalRank;
501: PetscSleep(rank);
502: }
503: #endif
504:
505: PetscGetVersion(&version);
507: fprintf(fd,"--------------------------------------------\
508: ------------------------------\n");
509: fprintf(fd,"%s\n",version);
510: fprintf(fd,"See docs/changes/index.html for recent updates.\n");
511: fprintf(fd,"See docs/troubleshooting.html for hints about trouble shooting.\n");
512: fprintf(fd,"See docs/index.html for manual pages.\n");
513: fprintf(fd,"--------------------------------------------\
514: ---------------------------\n");
515: if (PetscErrorPrintfInitializeCalled) {
516: fprintf(fd,"%s on a %s named %s by %s %s\n",pname,arch,hostname,username,date);
517: }
518: fprintf(fd,"Libraries linked from %s\n",PETSC_LIB_DIR);
519: fprintf(fd,"--------------------------------------------\
520: ---------------------------\n");
521: fflush(fd);
522: InPetscErrorPrintfDefault = PETSC_FALSE;
523: }
525: if (!InPetscErrorPrintfDefault) {
526: va_start(Argp,format);
527: fprintf(fd,"[%d]PETSC ERROR: ",PetscGlobalRank);
528: PetscVFPrintf(fd,format,Argp);
529: fflush(fd);
530: va_end(Argp);
531: }
532: return 0;
533: }
537: /*@C
538: PetscSynchronizedFGets - Several processors all get the same line from a file.
540: Collective on MPI_Comm
542: Input Parameters:
543: + comm - the communicator
544: . fd - the file pointer
545: - len - the length of the output buffer
547: Output Parameter:
548: . string - the line read from the file
550: Level: intermediate
552: .seealso: PetscSynchronizedPrintf(), PetscSynchronizedFlush(),
553: PetscFOpen(), PetscViewerASCIISynchronizedPrintf(), PetscViewerASCIIPrintf()
555: @*/
556: PetscErrorCode PetscSynchronizedFGets(MPI_Comm comm,FILE* fp,size_t len,char string[])
557: {
559: PetscMPIInt rank;
562: MPI_Comm_rank(comm,&rank);
563:
564: /* First processor prints immediately to fp */
565: if (!rank) {
566: fgets(string,len,fp);
567: }
568: MPI_Bcast(string,len,MPI_BYTE,0,comm);
569: return(0);
570: }