Actual source code: bjkokkoskernels.kokkos.cxx
1: #include <petsc/private/pcbjkokkosimpl.h>
3: #if defined(PETSC_HAVE_KOKKOS_KERNELS_BATCH)
4: #include <fstream>
6: #include "Kokkos_Timer.hpp"
7: #include "Kokkos_Random.hpp"
8: #include "Kokkos_UnorderedMap.hpp"
9: #include "Kokkos_Sort.hpp"
11: /// KokkosKernels headers
12: #include "KokkosBatched_Util.hpp"
13: #include "KokkosBatched_Vector.hpp"
15: #include <Kokkos_ArithTraits.hpp>
16: #include <KokkosBatched_Util.hpp>
17: #include <KokkosBatched_Vector.hpp>
18: #include <KokkosBatched_Copy_Decl.hpp>
19: #include <KokkosBatched_Copy_Impl.hpp>
20: #include <KokkosBatched_AddRadial_Decl.hpp>
21: #include <KokkosBatched_AddRadial_Impl.hpp>
22: #include <KokkosBatched_Gemm_Decl.hpp>
23: #include <KokkosBatched_Gemm_Serial_Impl.hpp>
24: #include <KokkosBatched_Gemm_Team_Impl.hpp>
25: #include <KokkosBatched_Gemv_Decl.hpp>
26: // #include
27: #include <KokkosBatched_Gemv_Team_Impl.hpp>
28: #include <KokkosBatched_Trsm_Decl.hpp>
29: #include <KokkosBatched_Trsm_Serial_Impl.hpp>
30: #include <KokkosBatched_Trsm_Team_Impl.hpp>
31: #include <KokkosBatched_Trsv_Decl.hpp>
32: #include <KokkosBatched_Trsv_Serial_Impl.hpp>
33: #include <KokkosBatched_Trsv_Team_Impl.hpp>
34: #include <KokkosBatched_LU_Decl.hpp>
35: #include <KokkosBatched_LU_Serial_Impl.hpp>
36: #include <KokkosBatched_LU_Team_Impl.hpp>
37: #include <KokkosSparse_CrsMatrix.hpp>
38: #include "KokkosBatched_Spmv.hpp"
39: #include "KokkosBatched_CrsMatrix.hpp"
40: #include "KokkosBatched_Krylov_Handle.hpp"
42: #include "KokkosBatched_GMRES.hpp"
43: #include "KokkosBatched_JacobiPrec.hpp"
45: template <typename DeviceType, typename ValuesViewType, typename IntView, typename VectorViewType, typename KrylovHandleType>
46: struct Functor_TestBatchedTeamVectorGMRES {
47: const ValuesViewType _D;
48: const ValuesViewType _diag;
49: const IntView _r;
50: const IntView _c;
51: const VectorViewType _X;
52: const VectorViewType _B;
53: const int _N_team, _team_size, _vector_length;
54: const int _N_iteration;
55: const double _tol;
56: const int _ortho_strategy;
57: const int _scratch_pad_level;
58: KrylovHandleType _handle;
60: KOKKOS_INLINE_FUNCTION
61: Functor_TestBatchedTeamVectorGMRES(const ValuesViewType &D, const IntView &r, const IntView &c, const VectorViewType &X, const VectorViewType &B, const int N_team, const int team_size, const int vector_length, const int N_iteration, const double tol, const int ortho_strategy, const int scratch_pad_level, KrylovHandleType &handle) :
62: _D(D), _r(r), _c(c), _X(X), _B(B), _N_team(N_team), _team_size(team_size), _vector_length(vector_length), _N_iteration(N_iteration), _tol(tol), _ortho_strategy(ortho_strategy), _scratch_pad_level(scratch_pad_level), _handle(handle)
63: {
64: }
66: KOKKOS_INLINE_FUNCTION
67: Functor_TestBatchedTeamVectorGMRES(const ValuesViewType &D, const ValuesViewType &diag, const IntView &r, const IntView &c, const VectorViewType &X, const VectorViewType &B, const int N_team, const int team_size, const int vector_length, const int N_iteration, const double tol, int ortho_strategy, const int scratch_pad_level, KrylovHandleType &handle) :
68: _D(D), _diag(diag), _r(r), _c(c), _X(X), _B(B), _N_team(N_team), _team_size(team_size), _vector_length(vector_length), _N_iteration(N_iteration), _tol(tol), _ortho_strategy(ortho_strategy), _scratch_pad_level(scratch_pad_level), _handle(handle)
69: {
70: }
72: template <typename MemberType>
73: KOKKOS_INLINE_FUNCTION void operator()(const MemberType &member) const
74: {
75: const int first_matrix = static_cast<int>(member.league_rank()) * _N_team;
76: const int N = _D.extent(0);
77: const int last_matrix = (static_cast<int>(member.league_rank() + 1) * _N_team < N ? static_cast<int>(member.league_rank() + 1) * _N_team : N);
78: const int graphID = static_cast<int>(member.league_rank());
79: using TeamVectorCopy1D = KokkosBatched::TeamVectorCopy<MemberType, KokkosBatched::Trans::NoTranspose, 1>;
81: auto d = Kokkos::subview(_D, Kokkos::make_pair(first_matrix, last_matrix), Kokkos::ALL);
82: auto x = Kokkos::subview(_X, Kokkos::make_pair(first_matrix, last_matrix), Kokkos::ALL);
83: auto b = Kokkos::subview(_B, Kokkos::make_pair(first_matrix, last_matrix), Kokkos::ALL);
84: using ScratchPadIntViewType = Kokkos::View<typename IntView::non_const_value_type *, typename IntView::array_layout, typename IntView::execution_space::scratch_memory_space>;
85: using ScratchPadValuesViewType = Kokkos::View<typename ValuesViewType::non_const_value_type **, typename ValuesViewType::array_layout, typename ValuesViewType::execution_space::scratch_memory_space>;
87: using Operator = KokkosBatched::CrsMatrix<ValuesViewType, ScratchPadIntViewType>;
88: ScratchPadIntViewType r(member.team_scratch(1), _r.extent(1));
89: ScratchPadIntViewType c(member.team_scratch(1), _c.extent(1));
91: TeamVectorCopy1D::invoke(member, Kokkos::subview(_r, graphID, Kokkos::ALL), r);
92: TeamVectorCopy1D::invoke(member, Kokkos::subview(_c, graphID, Kokkos::ALL), c);
93: Operator A(d, r, c);
95: ScratchPadValuesViewType diag(member.team_scratch(1), last_matrix - first_matrix, _diag.extent(1));
96: using PrecOperator = KokkosBatched::JacobiPrec<ScratchPadValuesViewType>;
98: KokkosBatched::TeamVectorCopy<MemberType>::invoke(member, Kokkos::subview(_diag, Kokkos::make_pair(first_matrix, last_matrix), Kokkos::ALL), diag);
99: PrecOperator P(diag);
100: P.setComputedInverse();
102: KokkosBatched::TeamVectorGMRES<MemberType>::template invoke<Operator, VectorViewType, PrecOperator, KrylovHandleType>(member, A, b, x, P, _handle);
103: }
104: inline double run(PC pc)
105: {
106: //typedef typename ValuesViewType::value_type value_type;
107: std::string name("KokkosBatched::Test::TeamVectorGMRES");
108: Kokkos::Timer timer;
109: Kokkos::Profiling::pushRegion(name.c_str());
111: Kokkos::TeamPolicy<DeviceType> auto_policy(ceil(1. * _D.extent(0) / _N_team), Kokkos::AUTO(), Kokkos::AUTO());
112: Kokkos::TeamPolicy<DeviceType> tuned_policy(ceil(1. * _D.extent(0) / _N_team), _team_size, _vector_length);
113: Kokkos::TeamPolicy<DeviceType> policy;
115: if (_team_size < 1) policy = auto_policy;
116: else policy = tuned_policy;
118: _handle.set_max_iteration(_N_iteration);
119: _handle.set_tolerance(_tol);
120: _handle.set_ortho_strategy(_ortho_strategy);
121: _handle.set_scratch_pad_level(_scratch_pad_level);
122: _handle.set_compute_last_residual(true);
124: int maximum_iteration = _handle.get_max_iteration();
126: using ScalarType = typename ValuesViewType::non_const_value_type;
127: using Layout = typename ValuesViewType::array_layout;
128: using EXSP = typename ValuesViewType::execution_space;
130: using ViewType2D = Kokkos::View<ScalarType **, Layout, EXSP>;
131: using IntViewType1D = Kokkos::View<PetscInt *, Layout, EXSP>;
133: size_t bytes_1D = ViewType2D::shmem_size(_N_team, 1);
134: size_t bytes_row_ptr = IntViewType1D::shmem_size(_r.extent(1));
135: size_t bytes_col_idc = IntViewType1D::shmem_size(_c.extent(1));
136: size_t bytes_2D_1 = ViewType2D::shmem_size(_N_team, _X.extent(1));
137: size_t bytes_2D_2 = ViewType2D::shmem_size(_N_team, maximum_iteration + 1);
139: size_t bytes_diag = bytes_2D_1;
140: size_t bytes_tmp = 2 * bytes_2D_1 + 2 * bytes_1D + bytes_2D_2;
142: policy.set_scratch_size(0, Kokkos::PerTeam(bytes_tmp));
143: policy.set_scratch_size(1, Kokkos::PerTeam(bytes_col_idc + bytes_row_ptr + bytes_diag));
144: PetscCall(PetscInfo(pc, "%d scratch memory(0) = %d + %d + %d bytes_diag=%d; %d scratch memory(1); %d maximum_iterations\n", (int)(bytes_tmp), 2 * (int)bytes_2D_1, 2 * (int)bytes_1D, (int)bytes_2D_2, (int)bytes_diag, (int)(bytes_row_ptr + bytes_col_idc + bytes_diag), (int)maximum_iteration));
145: exec_space().fence();
146: timer.reset();
147: Kokkos::parallel_for(name.c_str(), policy, *this);
148: exec_space().fence();
149: double sec = timer.seconds();
151: return sec;
152: }
153: };
155: PETSC_INTERN PetscErrorCode PCApply_BJKOKKOSKERNELS(PC pc, const PetscScalar *glb_bdata, PetscScalar *glb_xdata, const PetscInt *glb_Aai, const PetscInt *glb_Aaj, const PetscScalar *glb_Aaa, const PetscInt team_size, MatInfo info, const PetscInt batch_sz, PCFailedReason *pcreason)
156: {
157: PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
158: Mat A = pc->pmat;
159: const PetscInt maxit = jac->ksp->max_it, nBlk = jac->nBlocks;
160: const int Nsolves = nBlk;
161: int Nsolves_team = jac->nsolves_team, fill_idx = 0;
162: int Nloc = jac->const_block_size; // same grids
163: const int nnz = (int)info.nz_used / Nsolves; // fix for variable grid size
164: PetscReal rtol = jac->ksp->rtol;
165: const PetscScalar *glb_idiag = jac->d_idiag_k->data();
166: const PetscInt *d_bid_eqOffset = jac->d_bid_eqOffset_k->data();
167: const PetscInt *d_isicol = jac->d_isicol_k->data(), *d_isrow = jac->d_isrow_k->data();
169: PetscFunctionBegin;
170: if (Nsolves_team > batch_sz) Nsolves_team = batch_sz; // silently fix this
171: PetscCheck(jac->const_block_size, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "Kokkos (GMRES) solver requires constant block size (but can be made to work with species ordering or N_team==1)");
172: PetscCheck(Nsolves % Nsolves_team == 0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "Nsolves.mod(Nsolves_team) != 0: Nsolves = %d, Nsolves_team = %d", Nsolves, Nsolves_team);
173: PetscCheck(((int)info.nz_used) % Nsolves == 0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "info.nz_used.mod(Nsolves) != 0: info.nz_used = %g, Nsolves = %d", info.nz_used, Nsolves);
174: #if defined(PETSC_HAVE_CUDA)
175: nvtxRangePushA("gmres-kk");
176: #endif
177: Kokkos::View<PetscScalar **, layout, exec_space, Kokkos::MemoryTraits<Kokkos::Unmanaged>> inv_diag((PetscScalar *)glb_idiag, Nsolves, Nloc); // in correct order
178: if (!jac->rowOffsets) {
179: jac->rowOffsets = new IntView("rowOffsets", Nsolves / Nsolves_team, Nloc + 1); // same grids
180: jac->colIndices = new IntView("colIndices", Nsolves / Nsolves_team, nnz);
181: jac->batch_b = new XYType("batch rhs", Nsolves, Nloc);
182: jac->batch_x = new XYType("batch sol", Nsolves, Nloc);
183: jac->batch_values = new AMatrixValueView("batch values", Nsolves, nnz);
184: fill_idx = 1;
185: PetscCall(PetscInfo(pc, "Setup indices Nloc=%d, nnz=%d\n", Nloc, nnz));
186: }
187: IntView &rowOffsets = *jac->rowOffsets;
188: IntView &colIndices = *jac->colIndices;
189: XYType &batch_b = *jac->batch_b;
190: XYType &batch_x = *jac->batch_x;
191: AMatrixValueView &batch_values = *jac->batch_values;
193: Kokkos::deep_copy(batch_x, 0.);
194: PetscCall(PetscInfo(pc, "\tjac->n = %d, Nloc = %d, Nsolves = %d, nnz = %d, Nsolves_team = %d, league size = %d, maxit = %d\n", (int)jac->n, Nloc, Nsolves, nnz, Nsolves_team, Nsolves / Nsolves_team, (int)maxit));
195: Kokkos::parallel_for(
196: "rowOffsets+map", Kokkos::TeamPolicy<>(Nsolves, team_size, PCBJKOKKOS_VEC_SIZE), KOKKOS_LAMBDA(const team_member team) {
197: const int blkID = team.league_rank(), start = d_bid_eqOffset[blkID], end = d_bid_eqOffset[blkID + 1];
198: if (fill_idx) {
199: if (blkID % Nsolves_team == 0) { // first matrix on this member
200: Kokkos::parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](const int rowb) { // Nloc
201: int rowa = d_isicol[rowb];
202: int n = glb_Aai[rowa + 1] - glb_Aai[rowa];
203: rowOffsets(blkID / Nsolves_team, rowb + 1 - start) = n; // save sizes
204: });
205: }
206: }
207: // map b into field major space
208: Kokkos::parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
209: int rowa = d_isicol[rowb];
210: batch_b(blkID, rowb - start) = glb_bdata[rowa];
211: });
212: });
213: Kokkos::fence();
214: if (fill_idx) {
215: Kokkos::parallel_for(
216: "prefix sum", Kokkos::TeamPolicy<>(Nsolves / Nsolves_team, 1, 1), KOKKOS_LAMBDA(const team_member team) {
217: const int graphID = team.league_rank();
218: rowOffsets(graphID, 0) = 0;
219: for (int i = 0; i < Nloc; ++i) rowOffsets(graphID, i + 1) += rowOffsets(graphID, i);
220: });
221: Kokkos::fence();
222: }
223: Kokkos::parallel_for(
224: "copy matrix", Kokkos::TeamPolicy<>(Nsolves /* /batch_sz */, team_size, PCBJKOKKOS_VEC_SIZE), KOKKOS_LAMBDA(const team_member team) {
225: const int blkID = team.league_rank(), start = d_bid_eqOffset[blkID], end = d_bid_eqOffset[blkID + 1], graphID = blkID / Nsolves_team;
226: Kokkos::parallel_for(Kokkos::TeamThreadRange(team, start, end), [=](const int rowb) {
227: int rowa = d_isicol[rowb];
228: int n = glb_Aai[rowa + 1] - glb_Aai[rowa];
229: const PetscInt *aj = glb_Aaj + glb_Aai[rowa]; // global index
230: const PetscScalar *aa = glb_Aaa + glb_Aai[rowa];
231: Kokkos::parallel_for(Kokkos::ThreadVectorRange(team, n), [=](const int &i) {
232: PetscScalar val = aa[i];
233: if (fill_idx && blkID % Nsolves_team == 0) colIndices(graphID, rowOffsets(graphID, rowb - start) + i) = d_isrow[aj[i]] - blkID * Nloc; // local" global - block start
234: batch_values(blkID, rowOffsets(graphID, rowb - start) + i) = val;
235: });
236: });
237: });
238: Kokkos::fence();
239: // setup solver
240: using ScalarType = typename AMatrixValueView::non_const_value_type;
241: using MagnitudeType = typename Kokkos::Details::ArithTraits<ScalarType>::mag_type;
242: //using NormViewType = Kokkos::View;
243: using Norm2DViewType = Kokkos::View<MagnitudeType **, layout, exec_space>;
244: using Scalar3DViewType = Kokkos::View<ScalarType ***, layout, exec_space>;
245: using IntViewType = Kokkos::View<int *, layout, exec_space>;
246: using KrylovHandleType = KokkosBatched::KrylovHandle<Norm2DViewType, IntViewType, Scalar3DViewType>;
247: const int n_iterations = maxit;
248: //const int team_size = -1;
249: const int vector_length = -1;
250: const double tol = rtol;
251: const int ortho_strategy = 0;
252: KrylovHandleType handle(Nsolves, Nsolves_team, n_iterations, true);
253: handle.Arnoldi_view = Scalar3DViewType("", Nsolves, n_iterations, Nloc + n_iterations + 3);
254: // solve
255: Functor_TestBatchedTeamVectorGMRES<exec_space, AMatrixValueView, IntView, XYType, KrylovHandleType>(batch_values, inv_diag, rowOffsets, colIndices, batch_x, batch_b, Nsolves_team, -1, vector_length, n_iterations, tol, ortho_strategy, 0, handle).run(pc);
256: Kokkos::fence();
257: // get data back
258: Kokkos::parallel_for(
259: "map", Kokkos::TeamPolicy<>(Nsolves /* /batch_sz */, -1, PCBJKOKKOS_VEC_SIZE), KOKKOS_LAMBDA(const team_member team) {
260: const int blkID = team.league_rank(), start = d_bid_eqOffset[blkID], end = d_bid_eqOffset[blkID + 1]; // 0
261: // map x into Plex/PETSc
262: Kokkos::parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
263: int rowa = d_isicol[rowb];
264: glb_xdata[rowa] = batch_x(blkID, rowb - start);
265: });
266: });
267: // output assume species major - clone from Kokkos solvers
268: #if PCBJKOKKOS_VERBOSE_LEVEL >= 3
269: #if PCBJKOKKOS_VERBOSE_LEVEL >= 4
270: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "Iterations\n"));
271: #else
272: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "max iterations per species (gmres) :"));
273: #endif
274: for (PetscInt dmIdx = 0, s = 0, head = 0; dmIdx < jac->num_dms; dmIdx += batch_sz) {
275: for (PetscInt f = 0, idx = head; f < jac->dm_Nf[dmIdx]; f++, s++, idx++) {
276: #if PCBJKOKKOS_VERBOSE_LEVEL >= 4
277: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%2D:", s));
278: for (int bid = 0; bid < batch_sz; bid++) PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%3D ", handle.get_iteration_host(idx + bid * jac->dm_Nf[dmIdx])));
279: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "\n"));
280: #else
281: int count = 0, ii;
282: for (int bid = 0; bid < batch_sz; bid++) {
283: if ((ii = handle.get_iteration_host(idx + bid * jac->dm_Nf[dmIdx])) > count) count = ii;
284: }
285: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%3d", count));
286: #endif
287: }
288: head += batch_sz * jac->dm_Nf[dmIdx];
289: }
290: #if PCBJKOKKOS_VERBOSE_LEVEL == 3
291: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "\n"));
292: #endif
293: #endif
294: // return error code, get max it
295: PetscInt count = 0, mbid = 0;
296: if (handle.is_converged_host()) {
297: *pcreason = PC_NOERROR;
298: if (!jac->max_nits) {
299: for (int blkID = 0; blkID < nBlk; blkID++) {
300: if (handle.get_iteration_host(blkID) > jac->max_nits) {
301: jac->max_nits = handle.get_iteration_host(blkID);
302: mbid = blkID;
303: }
304: }
305: }
306: } else {
307: PetscCall(PetscPrintf(PETSC_COMM_SELF, "There is at least one system that did not converge."));
308: *pcreason = PC_SUBPC_ERROR;
309: }
310: // output - assume species major order
311: for (int blkID = 0; blkID < nBlk; blkID++) {
312: if (jac->reason) { // -pc_bjkokkos_ksp_converged_reason
313: if (jac->batch_target == blkID) {
314: if (batch_sz != 1)
315: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), " Linear solve %s in %d iterations, batch %" PetscInt_FMT ", species %" PetscInt_FMT "\n", handle.is_converged_host(blkID) ? "converged" : "diverged", handle.get_iteration_host(blkID), blkID % batch_sz, blkID / batch_sz));
316: else PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), " Linear solve %s in %d iterations, block %" PetscInt_FMT "\n", handle.is_converged_host(blkID) ? "converged" : "diverged", handle.get_iteration_host(blkID), blkID));
317: } else if (jac->batch_target == -1 && handle.get_iteration_host(blkID) >= count) {
318: jac->max_nits = count = handle.get_iteration_host(blkID);
319: mbid = blkID;
320: }
321: if (!handle.is_converged_host(blkID)) PetscCall(PetscPrintf(PETSC_COMM_SELF, "ERROR species %d, batch %d did not converge with %d iterations\n", (int)(blkID / batch_sz), (int)blkID % batch_sz, handle.get_iteration_host(blkID)));
322: }
323: }
324: if (jac->batch_target == -1 && jac->reason) {
325: if (batch_sz != 1)
326: PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), " Linear solve %s in %d iteration, batch %" PetscInt_FMT ", specie %" PetscInt_FMT "\n", handle.is_converged_host(mbid) ? "converged" : "diverged", jac->max_nits, mbid % batch_sz, mbid / batch_sz));
327: else PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), " Linear solve %s in %d iteration, block %" PetscInt_FMT "\n", handle.is_converged_host(mbid) ? "converged" : "diverged", jac->max_nits, mbid));
328: }
329: #if defined(PETSC_HAVE_CUDA)
330: nvtxRangePop();
331: #endif
333: return PETSC_SUCCESS;
334: }
335: #endif