Actual source code: ex42.c
petsc-3.8.4 2018-03-24
2: static char help[] = "Tests MatIncreaseOverlap() and MatCreateSubmatrices() for the parallel case.\n\
3: This example is similar to ex40.c; here the index sets used are random.\n\
4: Input arguments are:\n\
5: -f <input_file> : file to load. For example see $PETSC_DIR/share/petsc/datafiles/matrices\n\
6: -nd <size> : > 0 no of domains per processor \n\
7: -ov <overlap> : >=0 amount of overlap between domains\n\n";
9: #include <petscmat.h>
11: int main(int argc,char **args)
12: {
14: PetscInt nd = 2,ov=1,i,j,lsize,m,n,*idx,bs;
15: PetscMPIInt rank, size;
16: PetscBool flg;
17: Mat A,B,*submatA,*submatB;
18: char file[PETSC_MAX_PATH_LEN];
19: PetscViewer fd;
20: IS *is1,*is2;
21: PetscRandom r;
22: PetscBool test_unsorted = PETSC_FALSE;
23: PetscScalar rand;
25: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
26: #if defined(PETSC_USE_COMPLEX)
27: SETERRQ(PETSC_COMM_WORLD,1,"This example does not work with complex numbers");
28: #else
30: MPI_Comm_size(PETSC_COMM_WORLD,&size);
31: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
32: PetscOptionsGetString(NULL,NULL,"-f",file,PETSC_MAX_PATH_LEN,NULL);
33: PetscOptionsGetInt(NULL,NULL,"-nd",&nd,NULL);
34: PetscOptionsGetInt(NULL,NULL,"-ov",&ov,NULL);
35: PetscOptionsGetBool(NULL,NULL,"-test_unsorted",&test_unsorted,NULL);
37: /* Read matrix A and RHS */
38: PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);
39: MatCreate(PETSC_COMM_WORLD,&A);
40: MatSetType(A,MATAIJ);
41: MatSetFromOptions(A);
42: MatLoad(A,fd);
43: PetscViewerDestroy(&fd);
45: /* Read the same matrix as a seq matrix B */
46: PetscViewerBinaryOpen(PETSC_COMM_SELF,file,FILE_MODE_READ,&fd);
47: MatCreate(PETSC_COMM_SELF,&B);
48: MatSetType(B,MATSEQAIJ);
49: MatSetFromOptions(B);
50: MatLoad(B,fd);
51: PetscViewerDestroy(&fd);
53: MatGetBlockSize(A,&bs);
55: /* Create the Random no generator */
56: MatGetSize(A,&m,&n);
57: PetscRandomCreate(PETSC_COMM_SELF,&r);
58: PetscRandomSetFromOptions(r);
60: /* Create the IS corresponding to subdomains */
61: PetscMalloc1(nd,&is1);
62: PetscMalloc1(nd,&is2);
63: PetscMalloc1(m ,&idx);
64: for (i = 0; i < m; i++) {idx[i] = i;}
66: /* Create the random Index Sets */
67: for (i=0; i<nd; i++) {
68: /* Skip a few,so that the IS on different procs are diffeent*/
69: for (j=0; j<rank; j++) {
70: PetscRandomGetValue(r,&rand);
71: }
72: PetscRandomGetValue(r,&rand);
73: lsize = (PetscInt)(rand*(m/bs));
74: /* shuffle */
75: for (j=0; j<lsize; j++) {
76: PetscInt k, swap, l;
78: PetscRandomGetValue(r,&rand);
79: k = j + (PetscInt)(rand*((m/bs)-j));
80: for (l = 0; l < bs; l++) {
81: swap = idx[bs*j+l];
82: idx[bs*j+l] = idx[bs*k+l];
83: idx[bs*k+l] = swap;
84: }
85: }
86: if (!test_unsorted) {PetscSortInt(lsize*bs,idx);}
87: ISCreateGeneral(PETSC_COMM_SELF,lsize*bs,idx,PETSC_COPY_VALUES,is1+i);
88: ISCreateGeneral(PETSC_COMM_SELF,lsize*bs,idx,PETSC_COPY_VALUES,is2+i);
89: ISSetBlockSize(is1[i],bs);
90: ISSetBlockSize(is2[i],bs);
91: }
93: if (!test_unsorted) {
94: MatIncreaseOverlap(A,nd,is1,ov);
95: MatIncreaseOverlap(B,nd,is2,ov);
97: for (i=0; i<nd; ++i) {
98: ISSort(is1[i]);
99: ISSort(is2[i]);
100: }
101: }
103: MatCreateSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);
104: MatCreateSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);
106: /* Now see if the serial and parallel case have the same answers */
107: for (i=0; i<nd; ++i) {
108: MatEqual(submatA[i],submatB[i],&flg);
109: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"proc:[%d], i=%D, flg =%d\n",rank,i,(int)flg);
110: PetscSynchronizedFlush(PETSC_COMM_WORLD,stdout);
111: if (!flg) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"%D-th paralle submatA != seq submatB",i);
112: }
114: /* Free Allocated Memory */
115: for (i=0; i<nd; ++i) {
116: ISDestroy(&is1[i]);
117: ISDestroy(&is2[i]);
118: }
119: MatDestroySubMatrices(nd,&submatA);
120: MatDestroySubMatrices(nd,&submatB);
122: PetscRandomDestroy(&r);
123: PetscFree(is1);
124: PetscFree(is2);
125: MatDestroy(&A);
126: MatDestroy(&B);
127: PetscFree(idx);
129: #endif
130: PetscFinalize();
131: return ierr;
132: }