xref: /aosp_15_r20/external/pytorch/aten/src/ATen/templates/Operators.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 // ${generated_comment}
4 
5 #ifdef TORCH_ASSERT_NO_OPERATORS
6 #error This change adds a dependency on native_functions.yaml,             \
7   meaning the file will need to be re-compiled every time an operator      \
8   is changed or added. Consider if your change would be better placed in   \
9   another file, or if a more specific header might achieve the same goal.  \
10   See NOTE: [Tensor vs. TensorBase]
11 #endif
12 
13 #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
14 #error This change adds a dependency on all pytorch operators, meaning the     \
15   file will need to be re-compiled every time an operator is changed or added. \
16   Consider including a specific operator from <ATen/ops/{my_operator}_ops.h>   \
17   and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
18 #endif
19 
20 #include <c10/core/SymInt.h>
21 #include <c10/core/SymIntArrayRef.h>
22 #include <c10/core/Scalar.h>
23 #include <c10/core/TensorOptions.h>
24 #include <c10/core/QScheme.h>
25 #include <c10/util/OptionalArrayRef.h>
26 #include <tuple>
27 #include <vector>
28 
29 ${Operators_includes}
30 
31 // Extension writers: do you write wrapper functions? Are you frustrated with
32 // resolving overloads of operators? Are you frustrated with dealing with
33 // pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
34 // further, this is the utility for you.
35 //
36 // Given an operator schema: aten::op.overload(...
37 //
38 // Use ATEN_FN2(op, overload) to get a *function* version of the operator
39 // that is guaranteed to not be overloaded. This means that you can safely
40 // decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
41 //
42 // Given an operator schema without an overload name: aten::op(...
43 //
44 // Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
45 //
46 // There is some interesting behavior for out= operations.
47 // ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
48 // that is, the order of arguments is exactly what it looks like in the schema.
49 
50 #define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
51 #define ATEN_FN(op_name) at::_ops::op_name::call
52 
53 // Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
54 // metadata about a given aten operator.
55 // Notable data on the class includes:
56 // - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
57 // - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
58 // - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
59 // - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
60 
61 #define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
62 #define ATEN_OP(op_name) at::_ops::op_name
63 
64 // WARNING: Please do not call any of the ops in the _ops namespace directly.
65 // Use the ATEN_FN macros. We do not guarantee stability of the naming
66 // scheme for the functions in at::_ops
67 
68 // See Note [The ATen Operators API] for details of the at::_ops namespace
69 
70 namespace at {
71 namespace _ops {
72 ${Operators_declarations}
73 } // namespace _ops
74 } // namespace at
75