Actual source code: ex42.c
petsc-3.7.3 2016-08-01
2: static char help[] = "Tests MatIncreaseOverlap() and MatGetSubmatrices() for the parallel case.\n\
3: This example is similar to ex40.c; here the index sets used are random.\n\
4: Input arguments are:\n\
5: -f <input_file> : file to load. For example see $PETSC_DIR/share/petsc/datafiles/matrices\n\
6: -nd <size> : > 0 no of domains per processor \n\
7: -ov <overlap> : >=0 amount of overlap between domains\n\n";
9: #include <petscmat.h>
13: int main(int argc,char **args)
14: {
16: PetscInt nd = 2,ov=1,i,j,lsize,m,n,*idx,bs;
17: PetscMPIInt rank, size;
18: PetscBool flg;
19: Mat A,B,*submatA,*submatB;
20: char file[PETSC_MAX_PATH_LEN];
21: PetscViewer fd;
22: IS *is1,*is2;
23: PetscRandom r;
24: PetscBool test_unsorted = PETSC_FALSE;
25: PetscScalar rand;
27: PetscInitialize(&argc,&args,(char*)0,help);
28: #if defined(PETSC_USE_COMPLEX)
29: SETERRQ(PETSC_COMM_WORLD,1,"This example does not work with complex numbers");
30: #else
32: MPI_Comm_size(PETSC_COMM_WORLD,&size);
33: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
34: PetscOptionsGetString(NULL,NULL,"-f",file,PETSC_MAX_PATH_LEN,NULL);
35: PetscOptionsGetInt(NULL,NULL,"-nd",&nd,NULL);
36: PetscOptionsGetInt(NULL,NULL,"-ov",&ov,NULL);
37: PetscOptionsGetBool(NULL,NULL,"-test_unsorted",&test_unsorted,NULL);
39: /* Read matrix and RHS */
40: PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);
41: MatCreate(PETSC_COMM_WORLD,&A);
42: MatSetType(A,MATMPIAIJ);
43: MatSetFromOptions(A);
44: MatLoad(A,fd);
45: PetscViewerDestroy(&fd);
47: /* Read the matrix again as a seq matrix */
48: PetscViewerBinaryOpen(PETSC_COMM_SELF,file,FILE_MODE_READ,&fd);
49: MatCreate(PETSC_COMM_SELF,&B);
50: MatSetType(B,MATSEQAIJ);
51: MatSetFromOptions(B);
52: MatLoad(B,fd);
53: PetscViewerDestroy(&fd);
55: MatGetBlockSize(A,&bs);
57: /* Create the Random no generator */
58: MatGetSize(A,&m,&n);
59: PetscRandomCreate(PETSC_COMM_SELF,&r);
60: PetscRandomSetFromOptions(r);
62: /* Create the IS corresponding to subdomains */
63: PetscMalloc1(nd,&is1);
64: PetscMalloc1(nd,&is2);
65: PetscMalloc1(m ,&idx);
66: for (i = 0; i < m; i++) {idx[i] = i;}
68: /* Create the random Index Sets */
69: for (i=0; i<nd; i++) {
70: /* Skip a few,so that the IS on different procs are diffeent*/
71: for (j=0; j<rank; j++) {
72: PetscRandomGetValue(r,&rand);
73: }
74: PetscRandomGetValue(r,&rand);
75: lsize = (PetscInt)(rand*(m/bs));
76: /* shuffle */
77: for (j=0; j<lsize; j++) {
78: PetscInt k, swap, l;
80: PetscRandomGetValue(r,&rand);
81: k = j + (PetscInt)(rand*((m/bs)-j));
82: for (l = 0; l < bs; l++) {
83: swap = idx[bs*j+l];
84: idx[bs*j+l] = idx[bs*k+l];
85: idx[bs*k+l] = swap;
86: }
87: }
88: if (!test_unsorted) {PetscSortInt(lsize*bs,idx);}
89: ISCreateGeneral(PETSC_COMM_SELF,lsize*bs,idx,PETSC_COPY_VALUES,is1+i);
90: ISCreateGeneral(PETSC_COMM_SELF,lsize*bs,idx,PETSC_COPY_VALUES,is2+i);
91: ISSetBlockSize(is1[i],bs);
92: ISSetBlockSize(is2[i],bs);
93: }
95: if (!test_unsorted) {
96: MatIncreaseOverlap(A,nd,is1,ov);
97: MatIncreaseOverlap(B,nd,is2,ov);
99: for (i=0; i<nd; ++i) {
100: ISSort(is1[i]);
101: ISSort(is2[i]);
102: }
103: }
105: MatGetSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);
106: MatGetSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);
108: /* Now see if the serial and parallel case have the same answers */
109: for (i=0; i<nd; ++i) {
110: MatEqual(submatA[i],submatB[i],&flg);
111: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"proc:[%d], i=%D, flg =%d\n",rank,i,(int)flg);
112: PetscSynchronizedFlush(PETSC_COMM_WORLD,stdout);
113: }
115: /* Free Allocated Memory */
116: for (i=0; i<nd; ++i) {
117: ISDestroy(&is1[i]);
118: ISDestroy(&is2[i]);
119: MatDestroy(&submatA[i]);
120: MatDestroy(&submatB[i]);
121: }
122: PetscFree(submatA);
123: PetscFree(submatB);
124: PetscRandomDestroy(&r);
125: PetscFree(is1);
126: PetscFree(is2);
127: MatDestroy(&A);
128: MatDestroy(&B);
129: PetscFree(idx);
131: PetscFinalize();
132: #endif
133: return 0;
134: }