Actual source code: cupmdevice.cxx
1: #include <petsc/private/cpp/memory.hpp>
3: #include "cupmdevice.hpp"
5: #include <algorithm>
6: #include <csetjmp> // for cuda mpi awareness
7: #include <csignal> // SIGSEGV
8: #include <iterator>
9: #include <type_traits>
11: namespace Petsc
12: {
14: namespace device
15: {
17: namespace cupm
18: {
20: // internal "impls" class for CUPMDevice. Each instance represents a single cupm device
21: template <DeviceType T>
22: class Device<T>::DeviceInternal {
23: const int id_;
24: bool devInitialized_ = false;
25: cupmDeviceProp_t dprop_{}; // cudaDeviceProp appears to be an actual struct, i.e. you can't
26: // initialize it with nullptr or NULL (i've tried)
28: static PetscErrorCode CUPMAwareMPI_(bool *) noexcept;
30: public:
31: // default constructor
32: explicit constexpr DeviceInternal(int dev) noexcept : id_(dev) { }
34: // gather all relevant information for a particular device, a cupmDeviceProp_t is
35: // usually sufficient here
36: PetscErrorCode initialize() noexcept;
37: PetscErrorCode configure() noexcept;
38: PetscErrorCode view(PetscViewer) const noexcept;
39: PetscErrorCode getattribute(PetscDeviceAttribute, void *) const noexcept;
41: PETSC_NODISCARD auto id() const -> decltype(id_) { return id_; }
42: PETSC_NODISCARD auto initialized() const -> decltype(devInitialized_) { return devInitialized_; }
43: PETSC_NODISCARD auto prop() const -> const decltype(dprop_) & { return dprop_; }
44: };
46: // the goal here is simply to get the cupm backend to create its context, not to do any type of
47: // modification of it, or create objects (since these may be affected by subsequent
48: // configuration changes)
49: template <DeviceType T>
50: PetscErrorCode Device<T>::DeviceInternal::initialize() noexcept
51: {
52: PetscFunctionBegin;
53: if (initialized()) PetscFunctionReturn(PETSC_SUCCESS);
54: devInitialized_ = true;
55: // need to do this BEFORE device has been set, although if the user
56: // has already done this then we just ignore it
57: if (cupmSetDeviceFlags(cupmDeviceMapHost) == cupmErrorSetOnActiveProcess) {
58: // reset the error if it was cupmErrorSetOnActiveProcess
59: const auto PETSC_UNUSED unused = cupmGetLastError();
60: } else PetscCallCUPM(cupmGetLastError());
61: // cuda 5.0+ will create a context when cupmSetDevice is called
62: if (cupmSetDevice(id()) != cupmErrorDeviceAlreadyInUse) PetscCallCUPM(cupmGetLastError());
63: // and in case it doesn't, explicitly call init here
64: PetscCallCUPM(cupmInit(0));
65: // where is this variable defined and when is it set? who knows! but it is defined and set
66: // at this point. either way, each device must make this check since I guess MPI might not be
67: // aware of all of them?
68: if (use_gpu_aware_mpi) {
69: bool aware;
71: PetscCall(CUPMAwareMPI_(&aware));
72: // For Open MPI, we could do a compile time check with
73: // "defined(PETSC_HAVE_OPENMPI) && defined(MPIX_CUDA_AWARE_SUPPORT) &&
74: // MPIX_CUDA_AWARE_SUPPORT" to see if it is CUDA-aware. However, recent versions of IBM
75: // Spectrum MPI (e.g., 10.3.1) on Summit meet above conditions, but one has to use jsrun
76: // --smpiargs=-gpu to really enable GPU-aware MPI. So we do the check at runtime with a
77: // code that works only with GPU-aware MPI.
78: if (PetscUnlikely(!aware)) {
79: PetscCall((*PetscErrorPrintf)("PETSc is configured with GPU support, but your MPI is not GPU-aware. For better performance, please use a GPU-aware MPI.\n"));
80: PetscCall((*PetscErrorPrintf)("If you do not care, add option -use_gpu_aware_mpi 0. To not see the message again, add the option to your .petscrc, OR add it to the env var PETSC_OPTIONS.\n"));
81: PetscCall((*PetscErrorPrintf)("If you do care, for IBM Spectrum MPI on OLCF Summit, you may need jsrun --smpiargs=-gpu.\n"));
82: PetscCall((*PetscErrorPrintf)("For Open MPI, you need to configure it --with-cuda (https://www.open-mpi.org/faq/?category=buildcuda)\n"));
83: PetscCall((*PetscErrorPrintf)("For MVAPICH2-GDR, you need to set MV2_USE_CUDA=1 (http://mvapich.cse.ohio-state.edu/userguide/gdr/)\n"));
84: PetscCall((*PetscErrorPrintf)("For Cray-MPICH, you need to set MPICH_GPU_SUPPORT_ENABLED=1 (man mpi to see manual of cray-mpich)\n"));
85: PETSCABORT(PETSC_COMM_SELF, PETSC_ERR_LIB);
86: }
87: }
88: PetscFunctionReturn(PETSC_SUCCESS);
89: }
91: template <DeviceType T>
92: PetscErrorCode Device<T>::DeviceInternal::configure() noexcept
93: {
94: PetscFunctionBegin;
95: PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d being configured before it was initialized", id());
96: // why on EARTH nvidia insists on making otherwise informational states into
97: // fully-fledged error codes is beyond me. Why couldn't a pointer to bool argument have
98: // sufficed?!?!?!
99: if (cupmSetDevice(id_) != cupmErrorDeviceAlreadyInUse) PetscCallCUPM(cupmGetLastError());
100: // need to update the device properties
101: PetscCallCUPM(cupmGetDeviceProperties(&dprop_, id_));
102: PetscDeviceCUPMRuntimeArch = dprop_.major * 10 + dprop_.minor;
103: PetscCall(PetscInfo(nullptr, "Configured device %d\n", id_));
104: PetscFunctionReturn(PETSC_SUCCESS);
105: }
107: template <DeviceType T>
108: PetscErrorCode Device<T>::DeviceInternal::view(PetscViewer viewer) const noexcept
109: {
110: PetscBool isascii;
112: PetscFunctionBegin;
113: PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d being viewed before it was initialized or configured", id());
114: // we don't print device-specific info in CI-mode
115: if (PetscUnlikely(PetscCIEnabled)) PetscFunctionReturn(PETSC_SUCCESS);
116: PetscCall(PetscObjectTypeCompare(PetscObjectCast(viewer), PETSCVIEWERASCII, &isascii));
117: if (isascii) {
118: MPI_Comm comm;
119: PetscMPIInt rank;
120: PetscViewer sviewer;
122: int clock, memclock;
123: PetscCallCUPM(cupmDeviceGetAttribute(&clock, cupmDevAttrClockRate, id_));
124: PetscCallCUPM(cupmDeviceGetAttribute(&memclock, cupmDevAttrMemoryClockRate, id_));
126: PetscCall(PetscObjectGetComm(PetscObjectCast(viewer), &comm));
127: PetscCallMPI(MPI_Comm_rank(comm, &rank));
128: PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
129: PetscCall(PetscViewerASCIIPrintf(sviewer, "[%d] name: %s\n", rank, dprop_.name));
130: PetscCall(PetscViewerASCIIPushTab(sviewer));
131: PetscCall(PetscViewerASCIIPrintf(sviewer, "Compute capability: %d.%d\n", dprop_.major, dprop_.minor));
132: PetscCall(PetscViewerASCIIPrintf(sviewer, "Multiprocessor Count: %d\n", dprop_.multiProcessorCount));
133: PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Grid Dimensions: %d x %d x %d\n", dprop_.maxGridSize[0], dprop_.maxGridSize[1], dprop_.maxGridSize[2]));
134: PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Block Dimensions: %d x %d x %d\n", dprop_.maxThreadsDim[0], dprop_.maxThreadsDim[1], dprop_.maxThreadsDim[2]));
135: PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Threads Per Block: %d\n", dprop_.maxThreadsPerBlock));
136: PetscCall(PetscViewerASCIIPrintf(sviewer, "Warp Size: %d\n", dprop_.warpSize));
137: PetscCall(PetscViewerASCIIPrintf(sviewer, "Total Global Memory (bytes): %zu\n", dprop_.totalGlobalMem));
138: PetscCall(PetscViewerASCIIPrintf(sviewer, "Total Constant Memory (bytes): %zu\n", dprop_.totalConstMem));
139: PetscCall(PetscViewerASCIIPrintf(sviewer, "Shared Memory Per Block (bytes): %zu\n", dprop_.sharedMemPerBlock));
140: PetscCall(PetscViewerASCIIPrintf(sviewer, "Multiprocessor Clock Rate (kHz): %d\n", clock));
141: PetscCall(PetscViewerASCIIPrintf(sviewer, "Memory Clock Rate (kHz): %d\n", memclock));
142: PetscCall(PetscViewerASCIIPrintf(sviewer, "Memory Bus Width (bits): %d\n", dprop_.memoryBusWidth));
143: PetscCall(PetscViewerASCIIPrintf(sviewer, "Peak Memory Bandwidth (GB/s): %f\n", 2.0 * memclock * (dprop_.memoryBusWidth / 8) / 1.0e6));
144: PetscCall(PetscViewerASCIIPrintf(sviewer, "Can map host memory: %s\n", dprop_.canMapHostMemory ? "PETSC_TRUE" : "PETSC_FALSE"));
145: PetscCall(PetscViewerASCIIPrintf(sviewer, "Can execute multiple kernels concurrently: %s\n", dprop_.concurrentKernels ? "PETSC_TRUE" : "PETSC_FALSE"));
146: PetscCall(PetscViewerASCIIPopTab(sviewer));
147: PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
148: }
149: PetscFunctionReturn(PETSC_SUCCESS);
150: }
152: template <DeviceType T>
153: PetscErrorCode Device<T>::DeviceInternal::getattribute(PetscDeviceAttribute attr, void *value) const noexcept
154: {
155: PetscFunctionBegin;
156: PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d was not initialized", id());
157: switch (attr) {
158: case PETSC_DEVICE_ATTR_SIZE_T_SHARED_MEM_PER_BLOCK:
159: *static_cast<std::size_t *>(value) = prop().sharedMemPerBlock;
160: case PETSC_DEVICE_ATTR_MAX:
161: break;
162: }
163: PetscFunctionReturn(PETSC_SUCCESS);
164: }
166: static std::jmp_buf cupmMPIAwareJumpBuffer;
167: static bool cupmMPIAwareJumpBufferSet;
169: // godspeed to anyone that attempts to call this function
170: void SilenceVariableIsNotNeededAndWillNotBeEmittedWarning_ThisFunctionShouldNeverBeCalled()
171: {
172: PETSCABORT(MPI_COMM_NULL, (PetscErrorCode)INT_MAX);
173: if (cupmMPIAwareJumpBufferSet) (void)cupmMPIAwareJumpBuffer;
174: }
176: template <DeviceType T>
177: PetscErrorCode Device<T>::DeviceInternal::CUPMAwareMPI_(bool *awareness) noexcept
178: {
179: constexpr int hbuf[] = {1, 0};
180: int *dbuf = nullptr;
181: const auto cupmSignalHandler = [](int signal, void *ptr) -> PetscErrorCode {
182: if ((signal == SIGSEGV) && cupmMPIAwareJumpBufferSet) std::longjmp(cupmMPIAwareJumpBuffer, 1);
183: return PetscSignalHandlerDefault(signal, ptr);
184: };
186: PetscFunctionBegin;
187: *awareness = false;
188: PetscCallCUPM(cupmMalloc(reinterpret_cast<void **>(&dbuf), sizeof(hbuf)));
189: PetscCallCUPM(cupmMemcpy(dbuf, hbuf, sizeof(hbuf), cupmMemcpyHostToDevice));
190: PetscCallCUPM(cupmDeviceSynchronize());
191: PetscCall(PetscPushSignalHandler(cupmSignalHandler, nullptr));
192: cupmMPIAwareJumpBufferSet = true;
193: if (!setjmp(cupmMPIAwareJumpBuffer) && !MPI_Allreduce(dbuf, dbuf + 1, 1, MPI_INT, MPI_SUM, PETSC_COMM_SELF)) *awareness = true;
194: cupmMPIAwareJumpBufferSet = false;
195: PetscCall(PetscPopSignalHandler());
196: PetscCallCUPM(cupmFree(dbuf));
197: PetscFunctionReturn(PETSC_SUCCESS);
198: }
200: template <DeviceType T>
201: PetscErrorCode Device<T>::finalize_() noexcept
202: {
203: PetscFunctionBegin;
204: if (PetscUnlikely(!initialized_)) PetscFunctionReturn(PETSC_SUCCESS);
205: for (auto &&device : devices_) device.reset();
206: defaultDevice_ = PETSC_CUPM_DEVICE_NONE; // disabled by default
207: initialized_ = false;
208: PetscFunctionReturn(PETSC_SUCCESS);
209: }
211: template <DeviceType T>
212: PETSC_NODISCARD static PETSC_CONSTEXPR_14 const char *CUPM_VISIBLE_DEVICES() noexcept
213: {
214: switch (T) {
215: case DeviceType::CUDA:
216: return "CUDA_VISIBLE_DEVICES";
217: case DeviceType::HIP:
218: return "HIP_VISIBLE_DEVICES";
219: }
220: PetscUnreachable();
221: return "PETSC_ERROR_PLIB";
222: }
224: /*
225: The default device ID is
226: MPI -- rank % number_local_devices
227: PyTorch -- getenv("LOCAL_RANK")
228: */
229: template <DeviceType T>
230: PetscErrorCode Device<T>::initialize(MPI_Comm comm, PetscInt *defaultDeviceId, PetscBool *defaultView, PetscDeviceInitType *defaultInitType) noexcept
231: {
232: auto initId = std::make_pair(*defaultDeviceId, PETSC_FALSE);
233: auto initView = std::make_pair(*defaultView, PETSC_FALSE);
234: auto initType = std::make_pair(*defaultInitType, PETSC_FALSE);
235: int ndev = 0;
237: PetscFunctionBegin;
238: if (initialized_) PetscFunctionReturn(PETSC_SUCCESS);
239: initialized_ = true;
240: PetscCall(PetscRegisterFinalize(finalize_));
241: PetscCall(base_type::PetscOptionDeviceAll(comm, initType, initId, initView));
243: if (initType.first == PETSC_DEVICE_INIT_NONE) {
244: initId.first = PETSC_CUPM_DEVICE_NONE;
245: } else if (const auto cerr = cupmGetDeviceCount(&ndev)) {
246: auto PETSC_UNUSED ignored = cupmGetLastError();
248: PetscCheck((initType.first != PETSC_DEVICE_INIT_EAGER) && !initView.first, comm, PETSC_ERR_USER_INPUT, "Cannot eagerly initialize %s, as doing so results in %s error %d (%s) : %s", cupmName(), cupmName(), static_cast<PetscErrorCode>(cerr), cupmGetErrorName(cerr), cupmGetErrorString(cerr));
249: // we won't be initializing anything anyways
250: initType.first = PETSC_DEVICE_INIT_NONE;
251: // save the error code for later
252: initId.first = -static_cast<decltype(initId.first)>(cerr);
253: }
255: // check again for init type, since the device count may have changed it
256: if (initType.first == PETSC_DEVICE_INIT_NONE) {
257: // id < 0 (excluding PETSC_DECIDE) indicates an error has occurred during setup
258: if ((initId.first > 0) || (initId.first == PETSC_DECIDE)) initId.first = PETSC_CUPM_DEVICE_NONE;
259: // initType overrides initView
260: initView.first = PETSC_FALSE;
261: } else {
262: PetscCall(PetscDeviceCheckDeviceCount_Internal(ndev));
263: if (initId.first == PETSC_DECIDE) {
264: if (ndev) {
265: /* TORCHELASTIC_RUN_ID is used as a proxy to determine if the current process was launched with torchrun */
266: char *pytorch_exists = (char *)getenv("TORCHELASTIC_RUN_ID");
267: char *pytorch_rank = (char *)getenv("LOCAL_RANK");
269: if (pytorch_exists && pytorch_rank) {
270: char *endptr;
272: initId.first = (PetscInt)strtol(pytorch_rank, &endptr, 10);
273: PetscCheck(initId.first < ndev, PETSC_COMM_SELF, PETSC_ERR_LIB, "PyTorch environmental variable LOCAL_RANK %s > number devices %d", pytorch_rank, ndev);
274: } else {
275: PetscMPIInt rank;
277: PetscCallMPI(MPI_Comm_rank(comm, &rank));
278: initId.first = rank % ndev;
279: }
280: } else initId.first = 0;
281: }
282: if (initView.first) initType.first = PETSC_DEVICE_INIT_EAGER;
283: }
285: static_assert(std::is_same<PetscMPIInt, decltype(defaultDevice_)>::value, "");
286: // initId.first is PetscInt, _defaultDevice is int
287: PetscCall(PetscMPIIntCast(initId.first, &defaultDevice_));
288: // record the results of the initialization
289: *defaultDeviceId = initId.first;
290: *defaultView = initView.first;
291: *defaultInitType = initType.first;
292: PetscFunctionReturn(PETSC_SUCCESS);
293: }
295: template <DeviceType T>
296: PetscErrorCode Device<T>::init_device_id_(PetscInt *inid) const noexcept
297: {
298: const auto id = *inid == PETSC_DECIDE ? defaultDevice_ : (int)*inid;
299: const auto cerr = static_cast<cupmError_t>(-defaultDevice_);
301: PetscFunctionBegin;
302: PetscCheck(defaultDevice_ != PETSC_CUPM_DEVICE_NONE, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Trying to retrieve a %s PetscDevice when it has been disabled", cupmName());
303: PetscCheck(defaultDevice_ >= 0, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot lazily initialize PetscDevice: %s error %d (%s) : %s", cupmName(), static_cast<PetscErrorCode>(cerr), cupmGetErrorName(cerr), cupmGetErrorString(cerr));
304: PetscAssert(static_cast<decltype(devices_.size())>(id) < devices_.size(), PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Only supports %zu number of devices but trying to get device with id %d", devices_.size(), id);
306: if (!devices_[id]) devices_[id] = util::make_unique<DeviceInternal>(id);
307: PetscAssert(id == devices_[id]->id(), PETSC_COMM_SELF, PETSC_ERR_PLIB, "Entry %d contains device with mismatching id %d", id, devices_[id]->id());
308: PetscCall(devices_[id]->initialize());
309: *inid = id;
310: PetscFunctionReturn(PETSC_SUCCESS);
311: }
313: template <DeviceType T>
314: PetscErrorCode Device<T>::configure_device_(PetscDevice device) noexcept
315: {
316: PetscFunctionBegin;
317: PetscCall(devices_[device->deviceId]->configure());
318: PetscFunctionReturn(PETSC_SUCCESS);
319: }
321: template <DeviceType T>
322: PetscErrorCode Device<T>::view_device_(PetscDevice device, PetscViewer viewer) noexcept
323: {
324: PetscFunctionBegin;
325: // now this __shouldn't__ reconfigure the device, but there is a petscinfo call to indicate
326: // it is being reconfigured
327: PetscCall(devices_[device->deviceId]->configure());
328: PetscCall(devices_[device->deviceId]->view(viewer));
329: PetscFunctionReturn(PETSC_SUCCESS);
330: }
332: template <DeviceType T>
333: PetscErrorCode Device<T>::get_attribute_(PetscInt id, PetscDeviceAttribute attr, void *value) noexcept
334: {
335: PetscFunctionBegin;
336: PetscCall(devices_[id]->getattribute(attr, value));
337: PetscFunctionReturn(PETSC_SUCCESS);
338: }
340: // explicitly instantiate the classes
341: #if PetscDefined(HAVE_CUDA)
342: template class Device<DeviceType::CUDA>;
343: #endif
344: #if PetscDefined(HAVE_HIP)
345: template class Device<DeviceType::HIP>;
346: #endif
348: } // namespace cupm
350: } // namespace device
352: } // namespace Petsc