1# Copyright (c) Meta Platforms, Inc. and affiliates. 2# 3# This yaml file contains operators that are also defined by the ATen library. 4# For lean mode: 5# - Codegen'd target `executorch_generated_lib` will be reading all the information 6# from this file, including operator schema and kernel metadata. 7# - Selective build target `codegen:executorch_defined_ops` now is selecting all the 8# operators in this file, by dumping all the op names into `selected_operators.yaml`. 9# 10# See the README.md file in executorch/kernels/portable for a description of the syntax used 11# by this file. 12 13 14# aten ops 15- op: _to_copy.out 16 kernels: 17 - arg_meta: null 18 kernel_name: torch::executor::to_copy_out 19 20- op: _softmax.out 21 kernels: 22 - arg_meta: null 23 kernel_name: torch::executor::softmax_out 24 25- op: add.out 26 kernels: 27 - arg_meta: null 28 kernel_name: torch::executor::add_out 29 30- op: bmm.out 31 kernels: 32 - arg_meta: null 33 kernel_name: torch::executor::bmm_out 34 35- op: cat.out 36 kernels: 37 - arg_meta: null 38 kernel_name: torch::executor::cat_out 39 40- op: clone.out 41 kernels: 42 - arg_meta: null 43 kernel_name: torch::executor::clone_out 44 45- op: div.out 46 kernels: 47 - arg_meta: null 48 kernel_name: torch::executor::div_out 49 50- op: div.out_mode 51 kernels: 52 - arg_meta: null 53 kernel_name: torch::executor::div_out_mode 54 55- op: embedding.out 56 kernels: 57 - arg_meta: null 58 kernel_name: torch::executor::embedding_out 59 60- op: empty.out 61 kernels: 62 - arg_meta: null 63 kernel_name: torch::executor::empty_out 64 65- op: expand_copy.out 66 kernels: 67 - arg_meta: null 68 kernel_name: torch::executor::expand_copy_out 69 70- op: full.out 71 kernels: 72 - arg_meta: null 73 kernel_name: torch::executor::full_out 74 75- op: gelu.out 76 kernels: 77 - arg_meta: null 78 kernel_name: torch::executor::gelu_out 79 80- op: hardtanh.out 81 kernels: 82 - arg_meta: null 83 kernel_name: torch::executor::hardtanh_out 84 85- op: max_pool2d_with_indices.out 86 kernels: 87 - arg_meta: null 88 kernel_name: torch::executor::max_pool2d_with_indices_out 89 90- op: mean.out 91 kernels: 92 - arg_meta: null 93 kernel_name: torch::executor::mean_dim_out 94 95- op: mul.out 96 kernels: 97 - arg_meta: null 98 kernel_name: torch::executor::mul_out 99 100- op: mul.Scalar_out 101 kernels: 102 - arg_meta: null 103 kernel_name: torch::executor::mul_scalar_out 104 105- op: permute_copy.out 106 kernels: 107 - arg_meta: null 108 kernel_name: torch::executor::permute_copy_out 109 110- op: rsqrt.out 111 kernels: 112 - arg_meta: null 113 kernel_name: torch::executor::rsqrt_out 114 115- op: sigmoid.out 116 kernels: 117 - arg_meta: null 118 kernel_name: torch::executor::sigmoid_out 119 120- op: slice_copy.Tensor_out 121 kernels: 122 - arg_meta: null 123 kernel_name: torch::executor::slice_copy_Tensor_out 124 125- op: split_with_sizes_copy.out 126 kernels: 127 - arg_meta: null 128 kernel_name: torch::executor::split_with_sizes_copy_out 129 130- op: sub.out 131 kernels: 132 - arg_meta: null 133 kernel_name: torch::executor::sub_out 134 135- op: view_copy.out 136 kernels: 137 - arg_meta: null 138 kernel_name: torch::executor::view_copy_out 139 140- op: where.self_out 141 kernels: 142 - arg_meta: null 143 kernel_name: torch::executor::where_out 144 145# custom ops 146- func: cadence::quantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) 147 variants: function 148 kernels: 149 - arg_meta: null 150 kernel_name: impl::reference::quantize_per_tensor_out 151 152- func: cadence::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) 153 variants: function 154 kernels: 155 - arg_meta: null 156 kernel_name: impl::reference::dequantize_per_tensor_out 157 158- func: cadence::quantized_conv.out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] dilation, int groups, int input_zero_point, Tensor weight_zero_point, Tensor bias_scale, float out_scale, int out_zero_point, Tensor out_multiplier, Tensor out_shift, bool channel_last=False, *, Tensor(a!) out) -> Tensor(a!) 159 kernels: 160 - arg_meta: null 161 kernel_name: impl::reference::quantized_conv_out 162 163- func: cadence::quantized_layer_norm.out(Tensor input, Tensor in_scale, Tensor in_zero_point, int[] normalized_shape, Tensor weight, Tensor bias, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) 164 kernels: 165 - arg_meta: null 166 kernel_name: impl::reference::quantized_layer_norm_out 167- func: cadence::quantized_layer_norm.per_tensor_out(Tensor input, float in_scale, int in_zero_point, int[] normalized_shape, Tensor weight, Tensor bias, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) 168 kernels: 169 - arg_meta: null 170 kernel_name: impl::reference::quantized_layer_norm_per_tensor_out 171 172- func: cadence::quantized_linear.out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, Tensor weight_zero_point, Tensor out_multiplier, Tensor out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!) 173 kernels: 174 - arg_meta: null 175 kernel_name: impl::reference::quantized_linear_out 176 177- func: cadence::quantized_relu.out(Tensor X, Tensor X_zero_point, int out_zero_point, Tensor out_multiplier, Tensor out_shift, *, Tensor(a!) out) -> Tensor(a!) 178 kernels: 179 - arg_meta: null 180 kernel_name: impl::reference::quantized_relu_out 181 182- func: cadence::quantized_matmul.out(Tensor X, int X_zero_point, Tensor Y, int Y_zero_point, Tensor? bias, int out_multiplier, int out_shift, int out_zero_point, bool transposed, *, Tensor(a!) out) -> Tensor(a!) 183 kernels: 184 - arg_meta: null 185 kernel_name: impl::reference::quantized_matmul_out 186