xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/ts_native_functions.yaml (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1backend: Lazy
2cpp_namespace: torch::lazy
3full_codegen:
4  - _adaptive_avg_pool2d
5  - _adaptive_avg_pool2d_backward
6  - _log_softmax
7  - _log_softmax_backward_data
8  - _softmax
9  - _softmax_backward_data
10  - abs
11  - add.Tensor
12  - addcdiv
13  - addcmul
14  - addmm
15  - arange.start_out
16  - all
17  - any
18  - avg_pool2d
19  - avg_pool2d_backward
20  - baddbmm
21  - bernoulli
22  - bernoulli.p
23  - binary_cross_entropy
24  - binary_cross_entropy_backward
25  - bitwise_and.Tensor
26  - bitwise_or.Tensor
27  - bmm
28  - cat
29  - clamp
30  - clamp_min
31  - constant_pad_nd
32  - convolution
33  - convolution_backward
34  - cos
35  - cumsum
36  - div.Tensor
37  - div.Tensor_mode
38  - elu
39  - elu_backward
40  - embedding
41  - embedding_dense_backward
42  - eq.Scalar
43  - eq.Tensor
44  - exp
45  - flip
46  - floor
47  - frac
48  - gather
49  - ge.Scalar
50  - ge.Tensor
51  - gelu
52  - gelu_backward
53  - glu
54  - glu_backward
55  - glu_jvp
56  - grid_sampler_2d
57  - grid_sampler_2d_backward
58  - gt.Scalar
59  - gt.Tensor
60  - hardsigmoid
61  - index_select
62  - le.Scalar
63  - le.Tensor
64  - leaky_relu
65  - leaky_relu_backward
66  - log
67  - log2
68  - logdet
69  - log_sigmoid_backward
70  - log_sigmoid_forward
71  - lt.Scalar
72  - lt.Tensor
73  - masked_fill.Scalar
74  - masked_fill.Tensor
75  - max
76  - max.dim
77  - max_pool2d_with_indices
78  - max_pool2d_with_indices_backward
79  - maximum
80  - mean
81  - mean.dim
82  - min
83  - minimum
84  - mm
85  - mul.Tensor
86  - mv
87  - native_batch_norm
88  - native_batch_norm_backward
89  - native_dropout
90  - native_dropout_backward
91  - native_layer_norm
92  - native_layer_norm_backward
93  - ne.Scalar
94  - ne.Tensor
95  - neg
96  - nll_loss_backward
97  - nll_loss_forward
98  - nll_loss2d_backward
99  - nll_loss2d_forward
100  - nonzero
101  - norm.ScalarOpt_dim
102  - pow.Tensor_Scalar
103  - pow.Tensor_Tensor
104  - random
105  - random.from
106  - random.to
107  - reciprocal
108  - relu
109  - remainder.Tensor
110  - repeat
111  - rsqrt
112  - scatter_add
113  - sgn
114  - sigmoid
115  - sigmoid_backward
116  - silu
117  - smooth_l1_loss
118  - smooth_l1_loss_backward
119  - softplus
120  - softplus_backward
121  - sort
122  - sqrt
123  - stack
124  - std
125  - std.dim
126  - std.correction
127  - sub.Tensor
128  - sum
129  - sum.dim_IntList
130  - tanh
131  - tanh_backward
132  - threshold
133  - threshold_backward
134  - topk
135  - trace
136  - tril
137  - triu
138  - trunc
139  - upsample_bilinear2d
140  - upsample_bilinear2d_backward
141  - upsample_nearest2d
142  - upsample_nearest2d_backward
143  - zero
144  - alias_copy
145  - as_strided_copy
146  - diagonal_copy
147  - expand_copy
148  - permute_copy
149  - _reshape_alias_copy
150  - select_copy.int
151  - detach_copy
152  - slice_copy.Tensor
153  # Not implemented yet because LTC codegen doesn't currently work
154  # for ops that return lists of tensors.
155  #- split_copy.Tensor
156  #- split_with_sizes_copy
157  #- unbind_copy.int
158  - squeeze_copy
159  - squeeze_copy.dim
160  - squeeze_copy.dims
161  - t_copy
162  - transpose_copy.int
163  - unsqueeze_copy
164  - view_copy
165  - view_copy.dtype
166  - unfold_copy
167  - select_scatter
168  - slice_scatter
169  - diagonal_scatter
170  - as_strided_scatter
171  # random ops
172  - normal_functional
173  - uniform
174ir_gen:
175  - selu
176supported:
177  - clone
178  - _copy_from
179  - _copy_from_and_resize
180  - empty.memory_format
181  - empty_strided
182  - fill_.Scalar
183  - max_pool3d_with_indices
184  - max_pool3d_with_indices_backward
185  - _to_copy
186  - _unsafe_view
187  - lift
188  - lift_fresh
189  # Below are all operators that are "composite" in core,
190  # but require us to explicitly re-enable functionalization in order to use them.
191  # Why? These operators are all CompositeExplicitAutograd, which mean that they run
192  # after functionalization,
193  # but their implementations call view operators (which we need to functionalize away).
194  - block_diag
195  - diag_embed
196  - diagonal_backward
197  - slice_backward
198  - new_empty_strided
199  - narrow_copy
200  - pixel_shuffle
201  - pixel_unshuffle
202  - select_backward
203  - _trilinear
204  - linalg_pinv.atol_rtol_tensor
205  - logsumexp.out
206symint:
207  - empty.memory_format
208  - expand_copy
209  - narrow_copy
210  - view_copy
211  - as_strided_copy
212  - as_strided_scatter
213  - diagonal_backward
214  - slice_backward
215  - slice_copy.Tensor
216  - slice_scatter
217  - empty_strided
218  - new_empty_strided
219  - _reshape_alias_copy
220  - select_backward
221autograd:
222  - max_pool3d
223  - native_group_norm
224
225# Ops that don't have a native schema definitions and are dispatched within Lazy Tensor Core
226non_native:
227  - func: scalar(Scalar value, ScalarType type) -> Tensor
228    opkind: at::prim::Constant
229    properties:
230      - ShapeCompute
231      - TreatScalarsAsConstants
232      - CanBeReusedDeclOnly
233  # Even we have removed all the other view ops in favor of the *_copy version, expand
234  # is still kept because it's used in copy_.
235  - func: expand(Tensor input, int[] size, bool is_scalar_expand) -> Tensor
236  - func: cast(Tensor input, ScalarType dtype, ScalarType? stype) -> Tensor
237    opkind: ltc_cast
238    properties:
239      - ShapeCompute
240