xref: /aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_runtime/device_utils.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 // WARNING: Be careful when adding new includes here. This header will be used
4 // in model.so, and should not refer to any aten/c10 headers except the stable
5 // C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule
6 // applies to other files under torch/csrc/inductor/aoti_runtime/.
7 
8 #ifdef USE_CUDA
9 
10 // FIXME: Currently, CPU and CUDA backend are mutually exclusive.
11 // This is a temporary workaround. We need a better way to support
12 // multi devices.
13 
14 #include <cuda.h>
15 #include <cuda_runtime_api.h>
16 
17 #define AOTI_RUNTIME_DEVICE_CHECK(EXPR)                    \
18   do {                                                     \
19     const cudaError_t code = EXPR;                         \
20     const char* msg = cudaGetErrorString(code);            \
21     if (code != cudaSuccess) {                             \
22       throw std::runtime_error(                            \
23           std::string("CUDA error: ") + std::string(msg)); \
24     }                                                      \
25   } while (0)
26 
27 namespace torch::aot_inductor {
28 
29 using DeviceStreamType = cudaStream_t;
30 
31 } // namespace torch::aot_inductor
32 
33 #else // !USE_CUDA
34 
35 #define AOTI_RUNTIME_DEVICE_CHECK(EXPR)            \
36   bool ok = EXPR;                                  \
37   if (!ok) {                                       \
38     throw std::runtime_error("CPU runtime error"); \
39   }
40 
41 namespace torch {
42 namespace aot_inductor {
43 
44 using DeviceStreamType = void*;
45 
46 } // namespace aot_inductor
47 } // namespace torch
48 
49 #endif // USE_CUDA
50