1 // required for old g++ to compile PRId64 macros, see 2 // https://github.com/pytorch/pytorch/issues/3571 3 // for context 4 #ifndef __STDC_FORMAT_MACROS 5 #define __STDC_FORMAT_MACROS 6 #endif 7 8 // an external backend might generate file within its code tree 9 // and check all the source files within the tree with clang-format. 10 // so, disable it since the backend might have a different config. 11 // clang-format off 12 13 // NOTE: This condition is true for all PyTorch internal libraries, it 14 // just excludes external projects such as torch_xla which 15 // re-use some of the PyTorch codegen machinery. 16 #if defined(CAFFE2_BUILD_MAIN_LIB) || \ 17 defined(TORCH_CUDA_BUILD_MAIN_LIB) || \ 18 defined(TORCH_HIP_BUILD_MAIN_LIB) || \ 19 defined(TORCH_CUDA_CU_BUILD_MAIN_LIB) || \ 20 defined(TORCH_CUDA_CPP_BUILD_MAIN_LIB) 21 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS 22 #endif 23 24 // ${generated_comment} 25 26 #include <c10/core/TensorImpl.h> 27 #include <c10/core/Allocator.h> 28 #include <ATen/DeviceGuard.h> 29 #include <ATen/NamedTensorUtils.h> 30 #include <ATen/Utils.h> 31 #include <ATen/WrapDimUtils.h> 32 #include <ATen/Dispatch.h> 33 #include <c10/util/ExclusivelyOwned.h> 34 #include <c10/util/Half.h> 35 #include <c10/core/UndefinedTensorImpl.h> 36 #include <optional> 37 #include <ATen/Tensor.h> 38 #include <ATen/native/Resize.h> 39 40 #include <cstddef> 41 #include <functional> 42 #include <memory> 43 #include <utility> 44 45 #include <ATen/Config.h> 46 #include <ATen/core/op_registration/adaption.h> 47 #include <torch/library.h> 48 $extra_cuda_headers 49 $external_backend_headers 50 $dispatch_headers 51 $ops_headers 52 53 // See template file RegisterDispatchDefinitions.ini 54 $dispatch_definitions 55