Actual source code: fretrieve.c
petsc-3.4.5 2014-06-29
2: /*
3: Code for opening and closing files.
4: */
5: #include <petscsys.h>
6: #if defined(PETSC_HAVE_PWD_H)
7: #include <pwd.h>
8: #endif
9: #include <ctype.h>
10: #include <sys/stat.h>
11: #if defined(PETSC_HAVE_UNISTD_H)
12: #include <unistd.h>
13: #endif
14: #if defined(PETSC_HAVE_SYS_UTSNAME_H)
15: #include <sys/utsname.h>
16: #endif
17: #include <fcntl.h>
18: #include <time.h>
19: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
20: #include <sys/systeminfo.h>
21: #endif
25: /*
26: Private routine to delete tmp/shared storage
28: This is called by MPI, not by users.
30: Note: this is declared extern "C" because it is passed to MPI_Keyval_create()
32: */
33: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelTmpShared(MPI_Comm comm,PetscMPIInt keyval,void *count_val,void *extra_state)
34: {
38: PetscInfo1(0,"Deleting tmp/shared data in an MPI_Comm %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
39: PetscFree(count_val);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
40: PetscFunctionReturn(MPI_SUCCESS);
41: }
45: /*@C
46: PetscGetTmp - Gets the name of the tmp directory
48: Collective on MPI_Comm
50: Input Parameters:
51: + comm - MPI_Communicator that may share /tmp
52: - len - length of string to hold name
54: Output Parameters:
55: . dir - directory name
57: Options Database Keys:
58: + -shared_tmp
59: . -not_shared_tmp
60: - -tmp tmpdir
62: Environmental Variables:
63: + PETSC_SHARED_TMP
64: . PETSC_NOT_SHARED_TMP
65: - PETSC_TMP
67: Level: developer
70: If the environmental variable PETSC_TMP is set it will use this directory
71: as the "/tmp" directory.
73: @*/
74: PetscErrorCode PetscGetTmp(MPI_Comm comm,char dir[],size_t len)
75: {
77: PetscBool flg;
80: PetscOptionsGetenv(comm,"PETSC_TMP",dir,len,&flg);
81: if (!flg) {
82: PetscStrncpy(dir,"/tmp",len);
83: }
84: return(0);
85: }
89: /*@C
90: PetscSharedTmp - Determines if all processors in a communicator share a
91: /tmp or have different ones.
93: Collective on MPI_Comm
95: Input Parameters:
96: . comm - MPI_Communicator that may share /tmp
98: Output Parameters:
99: . shared - PETSC_TRUE or PETSC_FALSE
101: Options Database Keys:
102: + -shared_tmp
103: . -not_shared_tmp
104: - -tmp tmpdir
106: Environmental Variables:
107: + PETSC_SHARED_TMP
108: . PETSC_NOT_SHARED_TMP
109: - PETSC_TMP
111: Level: developer
113: Notes:
114: Stores the status as a MPI attribute so it does not have
115: to be redetermined each time.
117: Assumes that all processors in a communicator either
118: 1) have a common /tmp or
119: 2) each has a separate /tmp
120: eventually we can write a fancier one that determines which processors
121: share a common /tmp.
123: This will be very slow on runs with a large number of processors since
124: it requires O(p*p) file opens.
126: If the environmental variable PETSC_TMP is set it will use this directory
127: as the "/tmp" directory.
129: @*/
130: PetscErrorCode PetscSharedTmp(MPI_Comm comm,PetscBool *shared)
131: {
132: PetscErrorCode ierr;
133: PetscMPIInt size,rank,*tagvalp,sum,cnt,i;
134: PetscBool flg,iflg;
135: FILE *fd;
136: static PetscMPIInt Petsc_Tmp_keyval = MPI_KEYVAL_INVALID;
137: int err;
140: MPI_Comm_size(comm,&size);
141: if (size == 1) {
142: *shared = PETSC_TRUE;
143: return(0);
144: }
146: PetscOptionsGetenv(comm,"PETSC_SHARED_TMP",NULL,0,&flg);
147: if (flg) {
148: *shared = PETSC_TRUE;
149: return(0);
150: }
152: PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_TMP",NULL,0,&flg);
153: if (flg) {
154: *shared = PETSC_FALSE;
155: return(0);
156: }
158: if (Petsc_Tmp_keyval == MPI_KEYVAL_INVALID) {
159: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_Tmp_keyval,0);
160: }
162: MPI_Attr_get(comm,Petsc_Tmp_keyval,(void**)&tagvalp,(int*)&iflg);
163: if (!iflg) {
164: char filename[PETSC_MAX_PATH_LEN],tmpname[PETSC_MAX_PATH_LEN];
166: /* This communicator does not yet have a shared tmp attribute */
167: PetscMalloc(sizeof(PetscMPIInt),&tagvalp);
168: MPI_Attr_put(comm,Petsc_Tmp_keyval,tagvalp);
170: PetscOptionsGetenv(comm,"PETSC_TMP",tmpname,238,&iflg);
171: if (!iflg) {
172: PetscStrcpy(filename,"/tmp");
173: } else {
174: PetscStrcpy(filename,tmpname);
175: }
177: PetscStrcat(filename,"/petsctestshared");
178: MPI_Comm_rank(comm,&rank);
180: /* each processor creates a /tmp file and all the later ones check */
181: /* this makes sure no subset of processors is shared */
182: *shared = PETSC_FALSE;
183: for (i=0; i<size-1; i++) {
184: if (rank == i) {
185: fd = fopen(filename,"w");
186: if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename);
187: err = fclose(fd);
188: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
189: }
190: MPI_Barrier(comm);
191: if (rank >= i) {
192: fd = fopen(filename,"r");
193: if (fd) cnt = 1;
194: else cnt = 0;
195: if (fd) {
196: err = fclose(fd);
197: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
198: }
199: } else cnt = 0;
201: MPI_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);
202: if (rank == i) unlink(filename);
204: if (sum == size) {
205: *shared = PETSC_TRUE;
206: break;
207: } else if (sum != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Subset of processes share /tmp ");
208: }
209: *tagvalp = (int)*shared;
210: PetscInfo2(0,"processors %s %s\n",(*shared) ? "share":"do NOT share",(iflg ? tmpname:"/tmp"));
211: } else *shared = (PetscBool) *tagvalp;
212: return(0);
213: }
217: /*@C
218: PetscSharedWorkingDirectory - Determines if all processors in a communicator share a
219: working directory or have different ones.
221: Collective on MPI_Comm
223: Input Parameters:
224: . comm - MPI_Communicator that may share working directory
226: Output Parameters:
227: . shared - PETSC_TRUE or PETSC_FALSE
229: Options Database Keys:
230: + -shared_working_directory
231: . -not_shared_working_directory
233: Environmental Variables:
234: + PETSC_SHARED_WORKING_DIRECTORY
235: . PETSC_NOT_SHARED_WORKING_DIRECTORY
237: Level: developer
239: Notes:
240: Stores the status as a MPI attribute so it does not have
241: to be redetermined each time.
243: Assumes that all processors in a communicator either
244: 1) have a common working directory or
245: 2) each has a separate working directory
246: eventually we can write a fancier one that determines which processors
247: share a common working directory.
249: This will be very slow on runs with a large number of processors since
250: it requires O(p*p) file opens.
252: @*/
253: PetscErrorCode PetscSharedWorkingDirectory(MPI_Comm comm,PetscBool *shared)
254: {
255: PetscErrorCode ierr;
256: PetscMPIInt size,rank,*tagvalp,sum,cnt,i;
257: PetscBool flg,iflg;
258: FILE *fd;
259: static PetscMPIInt Petsc_WD_keyval = MPI_KEYVAL_INVALID;
260: int err;
263: MPI_Comm_size(comm,&size);
264: if (size == 1) {
265: *shared = PETSC_TRUE;
266: return(0);
267: }
269: PetscOptionsGetenv(comm,"PETSC_SHARED_WORKING_DIRECTORY",NULL,0,&flg);
270: if (flg) {
271: *shared = PETSC_TRUE;
272: return(0);
273: }
275: PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_WORKING_DIRECTORY",NULL,0,&flg);
276: if (flg) {
277: *shared = PETSC_FALSE;
278: return(0);
279: }
281: if (Petsc_WD_keyval == MPI_KEYVAL_INVALID) {
282: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_WD_keyval,0);
283: }
285: MPI_Attr_get(comm,Petsc_WD_keyval,(void**)&tagvalp,(int*)&iflg);
286: if (!iflg) {
287: char filename[PETSC_MAX_PATH_LEN];
289: /* This communicator does not yet have a shared attribute */
290: PetscMalloc(sizeof(PetscMPIInt),&tagvalp);
291: MPI_Attr_put(comm,Petsc_WD_keyval,tagvalp);
293: PetscGetWorkingDirectory(filename,240);
294: PetscStrcat(filename,"/petsctestshared");
295: MPI_Comm_rank(comm,&rank);
297: /* each processor creates a file and all the later ones check */
298: /* this makes sure no subset of processors is shared */
299: *shared = PETSC_FALSE;
300: for (i=0; i<size-1; i++) {
301: if (rank == i) {
302: fd = fopen(filename,"w");
303: if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename);
304: err = fclose(fd);
305: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
306: }
307: MPI_Barrier(comm);
308: if (rank >= i) {
309: fd = fopen(filename,"r");
310: if (fd) cnt = 1;
311: else cnt = 0;
312: if (fd) {
313: err = fclose(fd);
314: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
315: }
316: } else cnt = 0;
318: MPI_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);
319: if (rank == i) unlink(filename);
321: if (sum == size) {
322: *shared = PETSC_TRUE;
323: break;
324: } else if (sum != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Subset of processes share working directory");
325: }
326: *tagvalp = (int)*shared;
327: } else *shared = (PetscBool) *tagvalp;
328: PetscInfo1(0,"processors %s working directory\n",(*shared) ? "shared" : "do NOT share");
329: return(0);
330: }
335: /*@C
336: PetscFileRetrieve - Obtains a library from a URL or compressed
337: and copies into local disk space as uncompressed.
339: Collective on MPI_Comm
341: Input Parameter:
342: + comm - processors accessing the library
343: . libname - name of library, including entire URL (with or without .gz)
344: - llen - length of llibname
346: Output Parameter:
347: + llibname - name of local copy of library
348: - found - if found and retrieved the file
350: Level: developer
352: @*/
353: PetscErrorCode PetscFileRetrieve(MPI_Comm comm,const char libname[],char llibname[],size_t llen,PetscBool *found)
354: {
355: char buf[1024],tmpdir[PETSC_MAX_PATH_LEN],urlget[PETSC_MAX_PATH_LEN],*par;
356: const char *pdir;
357: FILE *fp;
359: int i;
360: PetscMPIInt rank;
361: size_t len = 0;
362: PetscBool flg1,flg2,flg3,sharedtmp,exists;
363: #if defined(PETSC_HAVE_POPEN)
364: PetscInt rval;
365: #endif
368: *found = PETSC_FALSE;
370: /* if file does not have an ftp:// or http:// or .gz then need not process file */
371: PetscStrstr(libname,".gz",&par);
372: if (par) {PetscStrlen(par,&len);}
374: PetscStrncmp(libname,"ftp://",6,&flg1);
375: PetscStrncmp(libname,"http://",7,&flg2);
376: PetscStrncmp(libname,"file://",7,&flg3);
377: if (!flg1 && !flg2 && !flg3 && (!par || len != 3)) {
378: PetscStrncpy(llibname,libname,llen);
379: PetscTestFile(libname,'r',found);
380: if (*found) {
381: PetscInfo1(NULL,"Found file %s\n",libname);
382: } else {
383: PetscInfo1(NULL,"Did not find file %s\n",libname);
384: }
385: return(0);
386: }
388: /* Determine if all processors share a common /tmp */
389: PetscSharedTmp(comm,&sharedtmp);
390: PetscOptionsGetenv(comm,"PETSC_TMP",tmpdir,PETSC_MAX_PATH_LEN,&flg1);
392: MPI_Comm_rank(comm,&rank);
393: if (!rank || !sharedtmp) {
395: /* Construct the script to get URL file */
396: PetscGetPetscDir(&pdir);
397: PetscStrcpy(urlget,pdir);
398: PetscStrcat(urlget,"/bin/urlget");
399: PetscTestFile(urlget,'r',&exists);
400: if (!exists) {
401: PetscTestFile("urlget",'r',&exists);
402: if (!exists) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot locate PETSc script urlget in %s or current directory",urlget);
403: PetscStrcpy(urlget,"urlget");
404: }
405: PetscStrcat(urlget," ");
407: /* are we using an alternative /tmp? */
408: if (flg1) {
409: PetscStrcat(urlget,"-tmp ");
410: PetscStrcat(urlget,tmpdir);
411: PetscStrcat(urlget," ");
412: }
414: PetscStrcat(urlget,libname);
415: PetscStrcat(urlget," 2>&1 ");
417: #if defined(PETSC_HAVE_POPEN)
418: PetscPOpen(PETSC_COMM_SELF,NULL,urlget,"r",&fp);
419: #else
420: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Cannot run external programs on this machine");
421: #endif
422: if (!fgets(buf,1024,fp)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"No output from ${PETSC_DIR}/bin/urlget in getting file %s",libname);
423: PetscInfo1(0,"Message back from urlget: %s\n",buf);
425: PetscStrncmp(buf,"Error",5,&flg1);
426: PetscStrncmp(buf,"Traceback",9,&flg2);
427: #if defined(PETSC_HAVE_POPEN)
428: PetscPClose(PETSC_COMM_SELF,fp,&rval);
429: #endif
430: if (flg1 || flg2) *found = PETSC_FALSE;
431: else {
432: *found = PETSC_TRUE;
434: /* Check for \n and make it 0 */
435: for (i=0; i<1024; i++) {
436: if (buf[i] == '\n') {
437: buf[i] = 0;
438: break;
439: }
440: }
441: PetscStrncpy(llibname,buf,llen);
442: }
443: }
444: if (sharedtmp) { /* send library name to all processors */
445: MPI_Bcast(found,1,MPIU_BOOL,0,comm);
446: if (*found) {
447: MPI_Bcast(llibname,llen,MPI_CHAR,0,comm);
448: MPI_Bcast(found,1,MPIU_BOOL,0,comm);
449: }
450: }
451: return(0);
452: }