1 #pragma once 2 3 // ${generated_comment} 4 5 #ifdef TORCH_ASSERT_NO_OPERATORS 6 #error This change adds a dependency on native_functions.yaml, \ 7 meaning the file will need to be re-compiled every time an operator \ 8 is changed or added. Consider if your change would be better placed in \ 9 another file, or if a more specific header might achieve the same goal. \ 10 See NOTE: [Tensor vs. TensorBase] 11 #endif 12 13 #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) 14 #error This change adds a dependency on all pytorch operators, meaning the \ 15 file will need to be re-compiled every time an operator is changed or added. \ 16 Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \ 17 and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. 18 #endif 19 20 #include <c10/core/Scalar.h> 21 #include <c10/core/Storage.h> 22 #include <c10/core/TensorOptions.h> 23 #include <c10/util/Deprecated.h> 24 #include <optional> 25 #include <c10/core/QScheme.h> 26 #include <ATen/core/Reduction.h> 27 #include <ATen/core/Tensor.h> 28 #include <tuple> 29 #include <vector> 30 31 ${NativeFunctions_includes} 32 33 ${NativeFunctions_declarations} 34