Actual source code: aijAssemble.cu
petsc-3.8.4 2018-03-24
1: #define PETSC_SKIP_COMPLEX
2: #define PETSC_SKIP_SPINLOCK
4: #include <petscconf.h>
5: #include <../src/mat/impls/aij/seq/aij.h>
6: #include <petscbt.h>
7: #include <../src/vec/vec/impls/dvecimpl.h>
8: #include <petsc/private/vecimpl.h>
9: #undef VecType
10: #include <../src/mat/impls/aij/seq/seqcusp/cuspmatimpl.h>
12: #include <thrust/reduce.h>
13: #include <thrust/inner_product.h>
15: #include <cusp/array1d.h>
16: #include <cusp/print.h>
17: #include <cusp/coo_matrix.h>
19: #include <cusp/io/matrix_market.h>
21: #include <thrust/iterator/counting_iterator.h>
22: #include <thrust/iterator/transform_iterator.h>
23: #include <thrust/iterator/permutation_iterator.h>
24: #include <thrust/functional.h>
26: // this example illustrates how to make repeated access to a range of values
27: // examples:
28: // repeated_range([0, 1, 2, 3], 1) -> [0, 1, 2, 3]
29: // repeated_range([0, 1, 2, 3], 2) -> [0, 0, 1, 1, 2, 2, 3, 3]
30: // repeated_range([0, 1, 2, 3], 3) -> [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
31: // ...
33: template <typename Iterator>
34: class repeated_range
35: {
36: public:
38: typedef typename thrust::iterator_difference<Iterator>::type difference_type;
40: struct repeat_functor : public thrust::unary_function<difference_type,difference_type>
41: {
42: difference_type repeats;
44: repeat_functor(difference_type repeats) : repeats(repeats) {}
46: __host__ __device__
47: difference_type operator()(const difference_type &i) const {
48: return i / repeats;
49: }
50: };
52: typedef typename thrust::counting_iterator<difference_type> CountingIterator;
53: typedef typename thrust::transform_iterator<repeat_functor, CountingIterator> TransformIterator;
54: typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
56: // type of the repeated_range iterator
57: typedef PermutationIterator iterator;
59: // construct repeated_range for the range [first,last)
60: repeated_range(Iterator first, Iterator last, difference_type repeats) : first(first), last(last), repeats(repeats) {}
62: iterator begin(void) const
63: {
64: return PermutationIterator(first, TransformIterator(CountingIterator(0), repeat_functor(repeats)));
65: }
67: iterator end(void) const
68: {
69: return begin() + repeats * (last - first);
70: }
72: protected:
73: difference_type repeats;
74: Iterator first;
75: Iterator last;
77: };
79: // this example illustrates how to repeat blocks in a range multiple times
80: // examples:
81: // tiled_range([0, 1, 2, 3], 2) -> [0, 1, 2, 3, 0, 1, 2, 3]
82: // tiled_range([0, 1, 2, 3], 4, 2) -> [0, 1, 2, 3, 0, 1, 2, 3]
83: // tiled_range([0, 1, 2, 3], 2, 2) -> [0, 1, 0, 1, 2, 3, 2, 3]
84: // tiled_range([0, 1, 2, 3], 2, 3) -> [0, 1, 0, 1 0, 1, 2, 3, 2, 3, 2, 3]
85: // ...
87: template <typename Iterator>
88: class tiled_range
89: {
90: public:
92: typedef typename thrust::iterator_difference<Iterator>::type difference_type;
94: struct tile_functor : public thrust::unary_function<difference_type,difference_type>
95: {
96: difference_type repeats;
97: difference_type tile_size;
99: tile_functor(difference_type repeats, difference_type tile_size) : tile_size(tile_size), repeats(repeats) {}
101: __host__ __device__
102: difference_type operator()(const difference_type &i) const {
103: return tile_size * (i / (tile_size * repeats)) + i % tile_size;
104: }
105: };
107: typedef typename thrust::counting_iterator<difference_type> CountingIterator;
108: typedef typename thrust::transform_iterator<tile_functor, CountingIterator> TransformIterator;
109: typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
111: // type of the tiled_range iterator
112: typedef PermutationIterator iterator;
114: // construct repeated_range for the range [first,last)
115: tiled_range(Iterator first, Iterator last, difference_type repeats)
116: : first(first), last(last), repeats(repeats), tile_size(last - first) {}
118: tiled_range(Iterator first, Iterator last, difference_type repeats, difference_type tile_size)
119: : first(first), last(last), repeats(repeats), tile_size(tile_size)
120: {
121: // ASSERT((last - first) % tile_size == 0)
122: }
124: iterator begin(void) const
125: {
126: return PermutationIterator(first, TransformIterator(CountingIterator(0), tile_functor(repeats, tile_size)));
127: }
129: iterator end(void) const
130: {
131: return begin() + repeats * (last - first);
132: }
134: protected:
135: difference_type repeats;
136: difference_type tile_size;
137: Iterator first;
138: Iterator last;
139: };
141: typedef cusp::device_memory memSpace;
142: typedef int IndexType;
143: typedef PetscScalar ValueType;
144: typedef cusp::array1d<IndexType, memSpace> IndexArray;
145: typedef cusp::array1d<ValueType, memSpace> ValueArray;
146: typedef IndexArray::iterator IndexArrayIterator;
147: typedef ValueArray::iterator ValueArrayIterator;
149: // Ne: Number of elements
150: // Nl: Number of dof per element
151: PetscErrorCode MatSetValuesBatch_SeqAIJCUSP(Mat J, PetscInt Ne, PetscInt Nl, PetscInt *elemRows, const PetscScalar *elemMats)
152: {
153: size_t N = Ne * Nl;
154: size_t No = Ne * Nl*Nl;
155: PetscInt Nr; // Number of rows
158: // copy elemRows and elemMat to device
159: IndexArray d_elemRows(elemRows, elemRows + N);
160: ValueArray d_elemMats(elemMats, elemMats + No);
163: MatGetSize(J, &Nr, NULL);
164: // allocate storage for "fat" COO representation of matrix
165: PetscInfo1(J, "Making COO matrix of size %d\n", Nr);
166: cusp::coo_matrix<IndexType,ValueType, memSpace> COO(Nr, Nr, No);
168: // repeat elemRows entries Nl times
169: PetscInfo(J, "Making row indices\n");
170: repeated_range<IndexArrayIterator> rowInd(d_elemRows.begin(), d_elemRows.end(), Nl);
171: thrust::copy(rowInd.begin(), rowInd.end(), COO.row_indices.begin());
173: // tile rows of elemRows Nl times
174: PetscInfo(J, "Making column indices\n");
175: tiled_range<IndexArrayIterator> colInd(d_elemRows.begin(), d_elemRows.end(), Nl, Nl);
176: thrust::copy(colInd.begin(), colInd.end(), COO.column_indices.begin());
178: // copy values from elemMats into COO structure (could be avoided)
179: thrust::copy(d_elemMats.begin(), d_elemMats.end(), COO.values.begin());
181: // For MPIAIJ, split this into two COO matrices, and return both
182: // Need the column map
184: // print the "fat" COO representation
185: #if !defined(PETSC_USE_COMPLEX)
186: if (PetscLogPrintInfo) cusp::print(COO);
187: #endif
188: // sort COO format by (i,j), this is the most costly step
189: PetscInfo(J, "Sorting rows and columns\n");
190: #if 1
191: COO.sort_by_row_and_column();
192: #else
193: {
194: PetscInfo(J, " Making permutation\n");
195: IndexArray permutation(No);
196: thrust::sequence(permutation.begin(), permutation.end());
198: // compute permutation and sort by (I,J)
199: {
200: PetscInfo(J, " Sorting columns\n");
201: IndexArray temp(No);
202: thrust::copy(COO.column_indices.begin(), COO.column_indices.end(), temp.begin());
203: thrust::stable_sort_by_key(temp.begin(), temp.end(), permutation.begin());
204: PetscInfo(J, " Sorted columns\n");
205: if (PetscLogPrintInfo) {
206: for (IndexArrayIterator t_iter = temp.begin(), p_iter = permutation.begin(); t_iter != temp.end(); ++t_iter, ++p_iter) {
207: PetscInfo2(J, "%d(%d)\n", *t_iter, *p_iter);
208: }
209: }
211: PetscInfo(J, " Copying rows\n");
212: //cusp::copy(COO.row_indices, temp);
213: thrust::copy(COO.row_indices.begin(), COO.row_indices.end(), temp.begin());
214: PetscInfo(J, " Gathering rows\n");
215: thrust::gather(permutation.begin(), permutation.end(), temp.begin(), COO.row_indices.begin());
216: PetscInfo(J, " Sorting rows\n");
217: thrust::stable_sort_by_key(COO.row_indices.begin(), COO.row_indices.end(), permutation.begin());
219: PetscInfo(J, " Gathering columns\n");
220: cusp::copy(COO.column_indices, temp);
221: thrust::gather(permutation.begin(), permutation.end(), temp.begin(), COO.column_indices.begin());
222: }
224: // use permutation to reorder the values
225: {
226: PetscInfo(J, " Sorting values\n");
227: ValueArray temp(COO.values);
228: cusp::copy(COO.values, temp);
229: thrust::gather(permutation.begin(), permutation.end(), temp.begin(), COO.values.begin());
230: }
231: }
232: #endif
234: // print the "fat" COO representation
235: #if !defined(PETSC_USE_COMPLEX)
236: if (PetscLogPrintInfo) cusp::print(COO);
237: #endif
238: // compute number of unique (i,j) entries
239: // this counts the number of changes as we move along the (i,j) list
240: PetscInfo(J, "Computing number of unique entries\n");
241: size_t num_entries = thrust::inner_product
242: (thrust::make_zip_iterator(thrust::make_tuple(COO.row_indices.begin(), COO.column_indices.begin())),
243: thrust::make_zip_iterator(thrust::make_tuple(COO.row_indices.end (), COO.column_indices.end())) - 1,
244: thrust::make_zip_iterator(thrust::make_tuple(COO.row_indices.begin(), COO.column_indices.begin())) + 1,
245: size_t(1),
246: thrust::plus<size_t>(),
247: thrust::not_equal_to< thrust::tuple<IndexType,IndexType> >());
249: // allocate COO storage for final matrix
250: PetscInfo(J, "Allocating compressed matrix\n");
251: cusp::coo_matrix<IndexType, ValueType, memSpace> A(Nr, Nr, num_entries);
253: // sum values with the same (i,j) index
254: // XXX thrust::reduce_by_key is unoptimized right now, so we provide a SpMV-based one in cusp::detail
255: // the Cusp one is 2x faster, but still not optimal
256: // This could possibly be done in-place
257: PetscInfo(J, "Compressing matrix\n");
258: thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(COO.row_indices.begin(), COO.column_indices.begin())),
259: thrust::make_zip_iterator(thrust::make_tuple(COO.row_indices.end(), COO.column_indices.end())),
260: COO.values.begin(),
261: thrust::make_zip_iterator(thrust::make_tuple(A.row_indices.begin(), A.column_indices.begin())),
262: A.values.begin(),
263: thrust::equal_to< thrust::tuple<IndexType,IndexType> >(),
264: thrust::plus<ValueType>());
266: // print the final matrix
267: #if !defined(PETSC_USE_COMPLEX)
268: if (PetscLogPrintInfo) cusp::print(A);
269: #endif
270: //std::cout << "Writing matrix" << std::endl;
271: //cusp::io::write_matrix_market_file(A, "A.mtx");
273: PetscInfo(J, "Converting to PETSc matrix\n");
274: MatSetType(J, MATSEQAIJCUSP);
275: //cusp::csr_matrix<PetscInt,PetscScalar,cusp::device_memory> Jgpu;
276: CUSPMATRIX *Jgpu = new CUSPMATRIX;
277: cusp::convert(A, *Jgpu);
278: #if !defined(PETSC_USE_COMPLEX)
279: if (PetscLogPrintInfo) cusp::print(*Jgpu);
280: #endif
281: PetscInfo(J, "Copying to CPU matrix\n");
282: MatCUSPCopyFromGPU(J, Jgpu);
283: return(0);
284: }