xref: /aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/ir/tfl_ops.td (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// LINT.IfChange
17// This is the operation definition file for TensorFlow Lite.
18
19#ifndef TFL_OPS
20#define TFL_OPS
21
22include "mlir/IR/FunctionInterfaces.td"
23include "mlir/IR/OpBase.td"
24include "mlir/Interfaces/ControlFlowInterfaces.td"
25include "mlir/Interfaces/InferTypeOpInterface.td"
26include "mlir/Dialect/Quant/QuantOpsBase.td"
27include "mlir/Interfaces/LoopLikeInterface.td"
28include "mlir/Interfaces/SideEffectInterfaces.td"
29include "tensorflow/compiler/mlir/lite/ir/tfl_op_interfaces.td"
30include "tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td"
31include "tensorflow/compiler/mlir/lite/quantization/quantization.td"
32include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td"
33
34//===----------------------------------------------------------------------===//
35// TFLite dialect string type - uses the TF string type as implementation
36//===----------------------------------------------------------------------===//
37def TFL_Str : Type<CPred<"$_self.isa<mlir::TF::StringType>()">,
38                  "TFLite string type">,
39             BuildableType<"getType<mlir::TF::StringType>()">;
40
41//===----------------------------------------------------------------------===//
42// TFLite dialect quint8 type - uses the TF quint8 type as implementation
43//===----------------------------------------------------------------------===//
44def TFL_Quint8 : Type<CPred<"$_self.isa<mlir::TF::Quint8Type>()">,
45                    "TFLite quint8 type">,
46              BuildableType<"getType<mlir::TF::Quint8Type>()">;
47
48//===----------------------------------------------------------------------===//
49// Type that represents control dependencies
50//===----------------------------------------------------------------------===//
51def TFL_Control: Type<CPred<"$_self.isa<ControlType>()">, "control">,
52                 BuildableType<"$_builder.getType<ControlType>()">;
53
54
55//===----------------------------------------------------------------------===//
56// TensorType attribute definitions.
57//===----------------------------------------------------------------------===//
58// A type attribute containing the TensorType.
59def TensorTypeAttr : TypeAttrBase<"TensorType", "Tensor type attribute">;
60
61//===----------------------------------------------------------------------===//
62// Derived shape attribute class.
63//===----------------------------------------------------------------------===//
64class DerivedShapeAttr<code body> : DerivedAttr<"ArrayRef<int64_t>", body>;
65class DerivedTFLiteTypeAttr<code body, code convert> :
66  DerivedAttr<"tflite::TensorType", body, convert>;
67
68// TFL Runtime op trait predicate.
69class TFL_RuntimePredOpTrait<string desc, Pred pred> :
70    GenInternalOpTrait<"TFLRuntimeOpTrait"> {
71  Pred tflRuntimePredicate = pred;
72  string tflRuntimeDescription = desc;
73}
74
75class TFL_OperandsHaveSameShapesOrBroadcastableShape<
76    list<int> indices, int max_bcast_rank> :
77  TFL_RuntimePredOpTrait<"operands do not have the same shape or "
78      "broadcastable shapes within the rank " # max_bcast_rank,
79    CPred<"TFL::VerifyOperandsHaveSameShapesOrBroadcastableShape("
80            "$_op, llvm::ArrayRef<unsigned>({" # !interleave(indices, ", ") #
81            "}), " # max_bcast_rank # ")">>;
82
83// These additional types/type constraints here are used to decouple the ops
84// from runtime support for the ops. Prefer to use these types when defining
85// new TF_Ops for uniformity.
86
87// TFL Runtime type predicate.
88class TFL_RuntimeType<TypeConstraint t> {
89  Pred tflRuntimeTypePredicate = t.predicate;
90  string tflRuntimeTypeDescription = t.summary;
91}
92
93class TFL_AnyTypeOf<list<Type> allowedRuntimeTypes, string description = "",
94                    list<Type> allowedOpTypes = [AnyType]> :
95  AnyTypeOf<allowedOpTypes, description>,
96  TFL_RuntimeType<AnyTypeOf<allowedRuntimeTypes, description>>;
97
98class TFL_TensorOf<list<Type> allowedRuntimeTypes,
99                   list<Type> allowedOpTypes = [AnyType]> :
100  TensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>> {
101  // Set the summary equal to that representing the runtime types.
102  let summary = TensorOf<allowedRuntimeTypes>.summary;
103}
104
105class TFL_TensorOfOrNone<list<Type> allowedRuntimeTypes, string description = "",
106                         list<Type> allowedOpTypes = [AnyType]> :
107  AnyTypeOf<[TFL_TensorOf<allowedOpTypes>, NoneType], description>,
108  TFL_RuntimeType<AnyTypeOf<[TFL_TensorOf<allowedRuntimeTypes>, NoneType]>>;
109
110class TFL_VariadicTensorOf<list<Type> allowedRuntimeTypes,
111                   list<Type> allowedOpTypes = [AnyType]> :
112  Variadic<TensorOf<allowedOpTypes>>,
113  TFL_RuntimeType<Variadic<TensorOf<allowedRuntimeTypes>>>;
114
115def TFL_Int32Or64 : SignlessIntOfWidths<[32, 64]>;
116
117def TFL_BoolTensor : TFL_TensorOf<[I1]>;
118def TFL_FpTensor : TFL_TensorOf<[F32]>;
119def TFL_I32OrI64Tensor : TFL_TensorOf<[TFL_Int32Or64]>;
120def TFL_I32Tensor : TFL_TensorOf<[I32]>;
121def TFL_I64Tensor : TFL_TensorOf<[I64]>;
122def TFL_Complex64Tensor : TFL_TensorOf<[Complex<F<32>>]>;
123def TFL_ResourceTensor : TFL_TensorOf<[TF_Resource]>;
124
125// TODO(jpienaar): Expand to all int types.
126def TFL_IntTensor : TypeAlias<TFL_I32Tensor, "tensor of any integer type">;
127
128class TFL_0DTensorOf<list<Type> allowedRuntimeTypes,
129                     list<Type> allowedOpTypes = [AnyType]> :
130  0DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
131class TFL_1DTensorOf<list<Type> allowedRuntimeTypes,
132                     list<Type> allowedOpTypes = [AnyType]> :
133  1DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
134class TFL_2DTensorOf<list<Type> allowedRuntimeTypes,
135                     list<Type> allowedOpTypes = [AnyType]> :
136  2DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
137
138class TFL_1DTensorOfOrNone<list<Type> allowedRuntimeTypes, string description = "",
139                         list<Type> allowedOpTypes = [AnyType]> :
140  AnyTypeOf<[TensorOf<allowedOpTypes>, NoneType], description>,
141  TFL_RuntimeType<AnyTypeOf<[TFL_1DTensorOf<allowedRuntimeTypes>, NoneType]>>;
142
143// This is used to represent the type of "ref tensors" or tensors that are
144// used as variables to track state.
145def TFL_StatefulTensor : TypeAlias<AnyTensor, "stateful tensor">;
146
147//===----------------------------------------------------------------------===//
148// Rank/Shape helpers.
149//===----------------------------------------------------------------------===//
150
151// Returns true of operand is none type.
152class TFL_OperandIsNoneType<int i> :
153  CPred<"$_op.getOperand(" # i # ").getType().isa<NoneType>()">;
154
155class TFL_OperandIsUnrankedPred<int n> :
156  CPred<"$_op.getOperand(" # n # ").getType().isa<UnrankedTensorType>()">;
157
158// TODO: Some of these could be generalized and/or moved to more general
159// location.
160// Returns true if the n-th operand has unknown rank or has rank m.
161class TFL_OperandHasRank<int n, int m> :
162  PredOpTrait<"operand " # n # " is " # m # "-D",
163    Or<[TFL_OperandIsUnrankedPred<n>,
164      CPred<"$_op.getOperand(" # n #
165      ").getType().cast<ShapedType>().getRank() == " # m>]>>;
166
167// Returns true if the n-th operand is ranked and has rank dim.
168class TFL_OperandHasKnownRank<int n, int dim> : And<[
169  CPred<"$_op.getOperand(" # n # ").getType().isa<RankedTensorType>()">,
170  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() == "
171    # dim>]>;
172
173// True if operand n is ranked and has a rank > dim.
174class TFL_OperandIsRankedAndHasDimPred<int n, int dim> : And<[
175  CPred<"$_op.getOperand(" # n # ").getType().isa<RankedTensorType>()">,
176  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() > "
177  # dim>]>;
178
179// Returns true if the n-th operand is ranked and has a dimension length = size
180// at the rank dim.
181class TFL_OperandDimEquals<int n, int dim, int size> : And<[
182  TFL_OperandIsRankedAndHasDimPred<n, dim>,
183  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>()"
184      ".getShape()[" # dim # " ] == " # size>]>;
185
186// Returns true if the n-th operand is ranked and has a dimension length <=
187// size at the rank dim.
188class TFL_OperandDimIsAtMost<int n, int dim, int size> : And<[
189  TFL_OperandIsRankedAndHasDimPred<n, dim>,
190  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>()"
191      ".getShape()[" # dim # " ] <= " # size>]>;
192
193// Returns true if the n-th operand has unknown rank or at least rank m.
194class TFL_OperandHasAtleastRank<int n, int m> :
195  PredOpTrait<"operand " # n # " is " # m # "-D",
196    Or<[CPred<"$_op.getOperand(" # n # ").getType().isa<UnrankedTensorType>()">,
197      CPred<"$_op.getOperand(" # n #
198        ").getType().cast<ShapedType>().getRank() >= " # m>]>>;
199
200class TFL_OperandRankEquals1DimOfOperand<int x, int y> :
201  PredOpTrait<"operand " # x # "'s rank equals operand " # y # "'s size",
202    Or<[TFL_OperandIsUnrankedPred<x>,
203        TFL_OperandIsUnrankedPred<y>,
204        CPred<"!$_op.getOperand(" # y #
205          ").getType().cast<ShapedType>().hasStaticShape()">,
206        CPred<"$_op.getOperand(" # x #
207          ").getType().cast<ShapedType>().getRank() == "
208          "$_op.getOperand(" # y #
209          ").getType().cast<ShapedType>().getShape()[0]">]>>;
210
211class TFL_Operand0DOr1ElementTensor<int x> :
212  PredOpTrait<"operand #" # x # " is an 0-d tensor or 1-d tensor w/ 1 element",
213    Or<[TFL_OperandHasKnownRank<x, 0>,
214        And<[TFL_OperandHasKnownRank<x, 1>, TFL_OperandDimEquals<x, 0, 1>]>]>>;
215
216// Return true if i-th dim of x-th operand is the same as j-th dim of y-th
217// operand or any of those operands does not have static shape.
218class TFL_OperandsHaveSameDims<int x, int y, int i, int j> :
219    Or<[TFL_OperandIsUnrankedPred<x>,
220        TFL_OperandIsUnrankedPred<y>,
221        CPred<"!$_op.getOperand(" # x #
222          ").getType().cast<ShapedType>().hasStaticShape()">,
223        CPred<"!$_op.getOperand(" # y #
224          ").getType().cast<ShapedType>().hasStaticShape()">,
225        CPred<"$_op.getOperand(" # x #
226          ").getType().cast<ShapedType>().getShape()[" # i # "] == "
227          "$_op.getOperand(" # y #
228          ").getType().cast<ShapedType>().getShape()[" # j # "]">]>;
229
230class TFL_OperandsHaveSameDimsTrait<int x, int y, int i, int j> :
231  PredOpTrait<"dim " # i # " of operand " # x # " equals to dim " # j #
232    " of operand " # y,
233    TFL_OperandsHaveSameDims<x, y, i, j>>;
234
235// Return true if number of elements of x-th operand is the same as j-th dim of
236// y-th operand or any of those operands does not have static shape.
237class TFL_NumElementsEqualsDim<int x, int y, int j> :
238  Or<[TFL_OperandIsUnrankedPred<x>,
239      TFL_OperandIsUnrankedPred<y>,
240      CPred<"!$_op.getOperand(" # x #
241        ").getType().cast<ShapedType>().hasStaticShape()">,
242      CPred<"!$_op.getOperand(" # y #
243        ").getType().cast<ShapedType>().hasStaticShape()">,
244      CPred<"$_op.getOperand(" # x #
245        ").getType().cast<ShapedType>().getNumElements() == "
246        "$_op.getOperand(" # y #
247        ").getType().cast<ShapedType>().getShape()[" # j # "]">]>;
248
249class TFL_NumElementsEqualsDimTrait<int x, int y, int j> :
250  PredOpTrait<"operand " # x # " has num of elements equals to dim " # j #
251    " of operand " # y,
252    TFL_NumElementsEqualsDim<x, y, j>>;
253
254// Return true if number of elements of x-th operand equals to n.
255class TFL_NumElements<int x, int n> :
256  Or<[TFL_OperandIsUnrankedPred<x>,
257      CPred<"!$_op.getOperand(" # x #
258        ").getType().cast<ShapedType>().hasStaticShape()">,
259      CPred<"$_op.getOperand(" # x #
260        ").getType().cast<ShapedType>().getNumElements() == " # n>]>;
261
262class TFL_NumElementsTrait<int x, int n> :
263  PredOpTrait<"operand " # x # " has num of elements equals to  " # n,
264    TFL_NumElements<x, n>>;
265
266// tf.uint8 and tf.quint8 are mapped to the same tflite types, so they are equal
267// when used as element types.
268class TFL_TFTypesWithSameBits<int i, int j, int num> :
269  And<[
270    Or<[CPred<"getElementTypeOrSelf($_op.getResult(" # i # ")).isa<mlir::TF::Quint" # num # "Type>()">,
271        CPred<"getElementTypeOrSelf($_op.getResult(" # i # ")).isUnsignedInteger(" # num # ")">]>,
272    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isa<mlir::TF::Quint" # num # "Type>()">,
273        CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>]>;
274
275class TFL_TFOperandTypesWithSameBits<int i, int j, int num> :
276  And<[
277    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isa<mlir::TF::Quint" # num # "Type>()">,
278        CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isUnsignedInteger(" # num # ")">]>,
279    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isa<mlir::TF::Quint" # num # "Type>()">,
280        CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>]>;
281
282class TFL_OperandIsNoneOrHasRank<int n, int m> :
283  PredOpTrait<"operand " # n # " is " # m # "-D",
284    Or<[
285      TFL_OperandIsNoneType<n>,
286      TFL_OperandIsUnrankedPred<n>,
287      CPred<"$_op.getOperand(" # n #
288      ").getType().cast<ShapedType>().getRank() == " # m>]>>;
289
290class TFL_OperandIsNoneOrHasRankAtMost<int n, int m> :
291  PredOpTrait<"operand " # n # " is at most " # m # "-D",
292    Or<[
293      TFL_OperandIsNoneType<n>,
294      TFL_OperandIsUnrankedPred<n>,
295      CPred<"$_op.getOperand(" # n #
296      ").getType().cast<ShapedType>().getRank() <= " # m>]>>;
297
298class TFL_OperandHasRankAtMostPred<int n, int m> :
299  Or<[TFL_OperandIsUnrankedPred<n>,
300    CPred<"$_op.getOperand(" # n #
301    ").getType().cast<ShapedType>().getRank() <= " # m>]>;
302
303class TFL_OperandHasRankAtMost<int n, int m> :
304  PredOpTrait<"operand " # n # " is at most " # m # "-D",
305    TFL_OperandHasRankAtMostPred<n, m>>;
306
307class TFL_OperandHasRankAtLeast<int n, int m> :
308  PredOpTrait<"operand " # n # " is at least " # m # "-D",
309    Or<[TFL_OperandIsUnrankedPred<n>,
310      CPred<"$_op.getOperand(" # n #
311      ").getType().cast<ShapedType>().getRank() >= " # m>]>>;
312
313class TFL_OperandHasRankRange<int n, int x, int y> :
314  PredOpTrait<"operand " # n # " has rank range [" # x # ", " # y # "]",
315    Or<[TFL_OperandIsUnrankedPred<n>,
316      CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() "
317      ">= " # x # " && $_op.getOperand(" # n # ").getType().cast<ShapedType>()."
318      "getRank() <= " # y>]>>;
319
320def TFL_FloatNonNegative : AttrConstraint<
321    CPred<"$_self.isa<FloatAttr>() && "
322            "!$_self.cast<FloatAttr>().getValue().isNegative()">,
323    "whose value is non-negative">;
324
325def TFL_BoolTrue : AttrConstraint<
326    CPred<"$_self.isa<BoolAttr>() && $_self.cast<BoolAttr>().getValue()">,
327    "whose value is true">;
328
329def TFL_BoolFalse : AttrConstraint<
330    CPred<"$_self.isa<BoolAttr>() && !$_self.cast<BoolAttr>().getValue()">,
331    "whose value is false">;
332
333class TFL_StringEqualsTo<string value> : AttrConstraint<
334    CPred<"$_self.cast<StringAttr>().getValue() == \"" # value # "\"">,
335    "whose value equals to '" # value # "'">;
336
337// Ensures the array attribute's size is within the given maximum size.
338class TFL_ArrayMaxCount<int n> : AttrConstraint<
339    CPred<"$_self.isa<ArrayAttr>() && $_self.cast<ArrayAttr>().size() <= " # n>,
340    "whose size is at most " # n>;
341
342// Ensures the given integer attribute has the given value.
343class TFL_IntEqualsTo<int n> : AttrConstraint<
344    CPred<"$_self.isa<IntegerAttr>() && "
345            "$_self.cast<IntegerAttr>().getInt() == " # n>,
346    "whose value is " # n>;
347
348// Ensures the given LSTMKernelType attribute has the given value.
349class TFL_LSTMKernelTypeEqualsTo<string value> : AttrConstraint<
350    CPred<"$_self.isa<LSTMKernelTypeAttr>() && "
351            "$_self.cast<LSTMKernelTypeAttr>().getValue() == " # value>,
352    "whose value is " # value>;
353
354// This is a quantization-aware version of TCresVTEtIsSameAsOp
355class TFL_TCresVTEtIsSameAsOp<int i, int j> : And<[
356  TCOpResIsShapedTypePred<i, j>,
357  Or<[
358    TCresVTEtIsSameAsOpBase<i, j>,
359    TFL_TFTypesWithSameBits<i, j, 8>,
360    And<[
361      SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))",
362        quant_QuantizedType.predicate>,
363      CPred<"quant::QuantizedType::castToStorageType("
364                "getElementTypeOrSelf($_op.getResult(" # i # "))) == "
365            "quant::QuantizedType::castToStorageType("
366                "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>]>]>;
367
368def TFL_SameFirstOperandAndFirstResultElementType :
369  PredOpTrait<"values and output must have same element type",
370              TFL_TCresVTEtIsSameAsOp<0, 0>>;
371
372// This is a quantization-aware version of TCopVTEtAreSameAt
373class TFL_TCopVTEtAreSameAt<int i, int j, int num=8> : Or<[
374  TCopVTEtAreSameAt<[i, j]>,
375  TFL_TFOperandTypesWithSameBits<i, j, num>,
376  And<[
377    SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))",
378      quant_QuantizedType.predicate>,
379    CPred<"quant::QuantizedType::castToStorageType("
380              "getElementTypeOrSelf($_op.getOperand(" # i # "))) == "
381          "quant::QuantizedType::castToStorageType("
382              "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>]>;
383
384//===----------------------------------------------------------------------===//
385// TFL op common constraints.
386//===----------------------------------------------------------------------===//
387
388class OperandsSameElementTypeConstraintBase<string op> :
389  PredOpTrait<op # " operands have same element type",
390    Or<[
391      TCopVTEtIsSameAs<0, 1>,
392      // Two operands' values are both quantized and their type have the same
393      // underlying storage type.
394      And<[
395        SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(0))",
396          quant_QuantizedType.predicate>,
397        CPred<"quant::QuantizedType::castToStorageType("
398                  "getElementTypeOrSelf($_op.getOperand(0))) == "
399              "quant::QuantizedType::castToStorageType("
400                  "getElementTypeOrSelf($_op.getOperand(1)))">]>]>>;
401
402// This is a constraint for most of the binary ops, e.g., add, mul, div, etc.
403// Binary ops lhs & rhs should have the same value type, and is capable to
404// compare quantization types as well.
405def BinaryOpSameElementTypeConstraint :
406  OperandsSameElementTypeConstraintBase<"binary op">;
407
408// This is a constraint for most of the comparison ops, e.g., equal, not_equal,
409// greater, greater_equal, less, etc. Comparison ops lhs & rhs should have the
410// same value type, and is capable to compare quantization types as well.
411def ComparisonOpSameElementTypeConstraint :
412  OperandsSameElementTypeConstraintBase<"comparison op">;
413
414//===----------------------------------------------------------------------===//
415// TFL common builders.
416//===----------------------------------------------------------------------===//
417
418def TFL_BroadcastableBinaryBuilder :
419  OpBuilder<(ins "Value":$lhs, "Value":$rhs),
420  [{
421    auto resultType =
422      OpTrait::util::getBroadcastedType(lhs.getType(), rhs.getType());
423    if (!resultType)
424      mlir::emitError($_state.location, "non-broadcastable operands");
425    $_state.addOperands({lhs, rhs});
426    $_state.types.push_back(resultType);
427  }]>;
428
429def TFL_FusedBroadcastableBinaryBuilder :
430  OpBuilder<(ins "Value":$lhs, "Value":$rhs,
431    "StringAttr":$fusedActivationFunction),
432  [{
433    buildFusedBroadcastableBinOp(
434       &$_builder, $_state, lhs, rhs, fusedActivationFunction);
435  }]>;
436
437def TFL_ComparisonBinaryBuilder :
438  OpBuilder<(ins "Value":$lhs, "Value":$rhs),
439  [{
440    buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
441  }]>;
442
443//===----------------------------------------------------------------------===//
444// TFL op base class.
445//===----------------------------------------------------------------------===//
446
447// LINT.IfChange
448
449class TFL_Op<string mnemonic, list<Trait> traits = []> :
450    Op<TFL_Dialect, mnemonic, !listconcat(traits,
451      [DeclareOpInterfaceMethods<TFL_RuntimeVerification>])> {
452  // FlatBuffer generation specific information.
453  // -------------------------------------------
454  // When generating the FlatBuffer output some operations have
455  // Options (as defined in the schema). These options are effectively
456  // the attributes of the operations (e.g., what padding is to be used
457  // for a pooling operator). Not all operations have Options and some
458  // operations share Options. The following attributes indicate whether
459  // the operation has Options in the serialized FlatBuffer.
460
461  // Whether the TFLite operator has options in the schema representation.
462  bit hasOptions = 0b0;
463
464  // Use to specify a custom options type for TFLite operators where
465  // the option's name does not match the TFLite operator's name.
466  // If no customOption is specified then <name>Options is used if the op
467  // hasOptions.
468  string customOption = ?;
469}
470
471class TFL_ConvOp<string mnemonic, string opSummary, int index,
472                 list<Trait> additional_traits = []> :
473    TFL_Op<mnemonic,[NoSideEffect,
474                     AccumulatorUniformScale<2, 0, 1>,
475                     AffineQuantizedOpInterface,
476                     AffineOpCoefficient<index, 1>,
477                     QuantizableResult,
478                     TFL_SparseOp] # additional_traits> {
479  let summary = opSummary # " operator";
480
481  let description = [{
482    Performs convolution operation on inputs.
483
484    Inputs:
485      `inputs[0]`: required: the input activation tensor
486      `inputs[1]`: required: the filter weight tensor
487      `inputs[2]`: optional: the bias tensor
488  }];
489
490  let arguments = (
491    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
492    TFL_TensorOf<[F32, QI8, QUI8]>:$filter,
493    TFL_1DTensorOfOrNone<[F32, I32, I64]>:$bias,
494    I32Attr:$dilation_h_factor,
495    I32Attr:$dilation_w_factor,
496    TFL_AFAttr:$fused_activation_function,
497    TFL_PaddingAttr:$padding,
498    I32Attr:$stride_h,
499    I32Attr:$stride_w
500  );
501
502  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
503
504  let hasOptions = 0b1;
505}
506
507
508//===----------------------------------------------------------------------===//
509// TFL op definitions.
510//===----------------------------------------------------------------------===//
511def TFL_AbsOp : TFL_Op<"abs", [
512    NoSideEffect,
513    QuantizableResult,
514    SameOperandsAndResultShape]> {
515  let summary = "Absolute value operator";
516
517  let description = [{
518Given a tensor `x`, this operation returns a tensor containing the absolute
519value of each element in `x`. For example, if x is an input element and y is
520an output element, this operation computes \\(y = |x|\\).
521  }];
522
523  let arguments = (ins TFL_TensorOf<[I16, F32, QI8, QI16]>:$x);
524
525  let results = (outs TFL_TensorOf<[I16, F32, QI8, QI16]>:$y);
526
527  let hasFolder = 1;
528}
529
530def TFL_AddOp : TFL_Op<"add", [
531    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
532      CPred<"TFL::VerifyAddOpShapeConstraints(llvm::cast<AddOp>($_op))">>,
533    ResultsBroadcastableShape,
534    NoSideEffect,
535    Commutative,
536    QuantizableResult,
537    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
538  let summary = "Addition operator";
539
540  let description = [{
541    Element-wise addition operation.
542  }];
543
544  let arguments = (
545    ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$lhs,
546    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$rhs,
547    TFL_AFAttr:$fused_activation_function);
548
549  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$output);
550
551  let hasFolder = 1;
552
553  let builders = [TFL_FusedBroadcastableBinaryBuilder];
554
555  let hasCustomAssemblyFormat = 1;
556
557  let extraClassDefinition = [{
558    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
559      return parseOneResultSameOperandTypeOp(parser, result);
560    }
561    void $cppClass::print(OpAsmPrinter &p) {
562      return printOneResultOp(getOperation(), p);
563    }
564  }];
565
566  let hasOptions = 1;
567}
568
569def TFL_AddNOp : TFL_Op<"add_n", [
570    Commutative,
571    NoSideEffect,
572    SameOperandsAndResultsScale,
573    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
574  let summary = "add_n operator";
575
576  let description = [{
577    Adds all input tensors element-wise.
578  }];
579
580  let arguments = (ins
581    TFL_VariadicTensorOf<[F32, I32]>:$inputs
582  );
583
584  let results = (outs
585    TFL_TensorOf<[F32, I32]>:$sum
586  );
587}
588
589def TFL_ReduceAnyOp : TFL_Op<"reduce_any", [NoSideEffect]> {
590  let summary = [{
591Computes the "logical or" of elements across dimensions of a tensor.
592  }];
593
594  let description = [{
595Reduces `input` along the dimensions given in `axis`. Unless
596`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
597`axis`. If `keep_dims` is true, the reduced dimensions are
598retained with length 1.
599  }];
600
601  let arguments = (ins
602    TFL_BoolTensor:$input,
603    TFL_I32Tensor:$reduction_indices,
604
605    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
606  );
607
608  let results = (outs
609    TFL_BoolTensor:$output
610  );
611
612  let hasOptions = 1;
613  let customOption = "ReducerOptions";
614}
615
616def TFL_ReduceAllOp : TFL_Op<"reduce_all", [NoSideEffect]> {
617  let summary = [{
618Computes the "logical and" of elements across dimensions of a tensor.
619  }];
620
621  let description = [{
622Reduces `input` along the dimensions given in `axis`. Unless
623`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
624`axis`. If `keep_dims` is true, the reduced dimensions are
625retained with length 1.
626  }];
627
628  let arguments = (ins
629    TFL_BoolTensor:$input,
630    TFL_I32Tensor:$reduction_indices,
631
632    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
633  );
634
635  let results = (outs
636    TFL_BoolTensor:$output
637  );
638
639  let hasOptions = 1;
640  let customOption = "ReducerOptions";
641}
642
643def TFL_TransposeConvOp: TFL_Op<"transpose_conv", [
644    NoSideEffect,
645    TFL_OperandHasRank<0, 1>,
646    TFL_OperandHasRank<1, 4>,
647    TFL_OperandHasRank<2, 4>,
648    PredOpTrait<"input and output must have same element type",
649      TFL_TCresVTEtIsSameAsOp<0, 2>>,
650    AccumulatorUniformScale<3, 1, 2>,
651    AffineQuantizedOpInterface, AffineOpCoefficient<0, 1>,
652    QuantizableResult,
653    TFL_SparseOp,
654    DeclareOpInterfaceMethods<TFL_ArithmeticCount>,
655    DynamicRangeQuantizedOpInterface]> {
656  let summary = "Transpose convolution operator";
657
658  let description = [{
659    Performs transpose convolution operation on input.
660  }];
661
662  let arguments = (ins
663    TFL_I32Tensor:$output_shape,
664    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$weights,
665    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
666    TFL_TensorOfOrNone<[F32, QI32, I64]>:$bias,
667    TFL_PaddingAttr:$padding,
668    ConfinedAttr<I32Attr, [IntPositive]>:$stride_h,
669    ConfinedAttr<I32Attr, [IntPositive]>:$stride_w
670  );
671
672  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
673
674  let hasOptions = 1;
675
676  let hasVerifier = 1;
677
678  let extraClassDeclaration = [{
679    // AffineQuantizedOpInterface:
680    int GetChannelDimIndex() { return 0; }
681    int GetQuantizationDimIndex() { return 0; }
682    // SparseOpInterface:
683    std::vector<int> GetSparseOperands() { return {1}; }
684    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
685    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
686    // DynamicRangeQuantizedOpInterface:
687    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
688  }];
689}
690
691def TFL_AveragePool2DOp:
692    TFL_Op<"average_pool_2d",
693           [NoSideEffect,
694            SameOperandsAndResultsScale,
695            QuantizableResult,
696            DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
697  let summary = "Average_pool_2d operator";
698
699  let description = [{
700    Performs average-pooling operation on input.
701  }];
702
703  let arguments = (
704    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
705    I32Attr:$filter_height,
706    I32Attr:$filter_width,
707    TFL_PaddingAttr:$padding,
708    I32Attr:$stride_h,
709    I32Attr:$stride_w,
710    TFL_AFAttr:$fused_activation_function
711  );
712
713  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
714
715  let hasOptions = 1;
716  let customOption = "Pool2DOptions";
717}
718
719def TFL_ArgMaxOp : TFL_Op<"arg_max", [
720    QuantizableResult,
721    NoSideEffect]> {
722  let summary = "ArgMax operator";
723
724  let description = [{
725    Returns the index with the largest value across dimensions of a tensor.
726  }];
727
728  let arguments = (
729    ins TFL_TensorOf<[I1, F32, I32, I8, UI8, QI8, QUI8]>:$input,
730    TFL_I32OrI64Tensor:$dim
731  );
732
733  let results = (outs
734    TFL_I32OrI64Tensor:$output
735  );
736
737  let hasOptions = 1;
738
739  DerivedTFLiteTypeAttr output_type = DerivedTFLiteTypeAttr<[{
740    return getResult().getType().cast<TensorType>().getElementType().
741        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
742            tflite::TensorType_INT32;
743    }], [{
744      TypeAttr::get(getResult().getType().cast<TensorType>().getElementType())
745    }]>;
746}
747
748def TFL_ArgMinOp : TFL_Op<"arg_min", [
749    QuantizableResult,
750    NoSideEffect]> {
751  let summary = "ArgMin operator";
752
753  let description = [{
754    Returns the index with the smallest value across dimensions of a tensor.
755      a = [1, 10, 26.9, 2.8, 166.32, 62.3]
756      b = tf.math.argmin(input = a)
757      c = tf.keras.backend.eval(b)
758  }];
759
760  let arguments = (
761    ins TFL_TensorOf<[I1, F32, I32, I8, UI8, QI8, QUI8]>:$input,
762    TFL_I32OrI64Tensor:$dim
763  );
764
765  let results = (outs
766    TFL_I32OrI64Tensor:$output
767  );
768
769  let hasOptions = 1;
770
771  DerivedTFLiteTypeAttr output_type = DerivedTFLiteTypeAttr<[{
772    return getResult().getType().cast<TensorType>().getElementType().
773        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
774            tflite::TensorType_INT32;
775    }], [{
776      TypeAttr::get(getResult().getType().cast<TensorType>().getElementType())
777    }]>;
778}
779
780def TFL_CeilOp: TFL_Op<"ceil", [
781    NoSideEffect,
782    SameOperandsAndResultShape,
783    SameOperandsAndResultType]> {
784  let summary = "Ceil operator";
785
786  let description = [{
787    Returns element-wise ceil value of the input.
788  }];
789
790  let arguments = (ins TFL_FpTensor:$x);
791
792  let results = (outs TFL_FpTensor:$y);
793}
794
795def TFL_ConcatenationOp : TFL_Op<"concatenation",
796  [
797    NoSideEffect,
798    TFL_SameFirstOperandAndFirstResultElementType,
799    SameOperandsAndResultsScale,
800    QuantizableResult
801  ]> {
802  let summary = "Concatenation operator";
803
804  let description = [{
805    Concatenates tensors along one dimension
806  }];
807
808  let arguments = (
809    ins TFL_VariadicTensorOf<
810      [F32, I64, I32, I16, I8, QI8, QUI8, UI8, I1]>:$values,
811    I32Attr:$axis,
812    TFL_AFAttr:$fused_activation_function
813  );
814
815  let results = (outs
816    TFL_TensorOf<
817      [F32, I64, I32, I16, I8, QI8, QUI8, UI8, I1]>:$output
818  );
819
820  let hasOptions = 1;
821
822  let hasFolder = 1;
823
824  let hasVerifier = 1;
825
826  let extraClassDeclaration = [{
827    // SameScalesOpInterface:
828    bool RequiredSameOperandsAndResultsScale(bool sign, int bit_width) {
829      // uint8 doesn't require same operands and results scales.
830      bool is_uint8 = !sign && (bit_width == 8);
831      return !is_uint8;
832    }
833  }];
834}
835
836def TFL_ConstOp : Op<TFL_Dialect, "pseudo_const", [ConstantLike, NoSideEffect,
837    FirstAttrDerivedResultType,
838    QuantizableResult,
839    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
840  let summary = "Constant pseudo op.";
841
842  let description = [{
843    Represents a constant value in TensorFlow Lite dialect. This is not an
844    actual operation and it will be lowered to buffer instead.
845
846    The op is allowed to have all the same type of attributes as tf.Const does
847    (e.g., opaque TF attributes are allowed).
848  }];
849
850  let arguments = (ins ElementsAttr:$value);
851
852  let results = (outs AnyTensor:$output);
853
854  let hasFolder = 1;
855  let hasCanonicalizer = 1;
856
857  let builders = [
858    OpBuilder<(ins "TypedAttr":$value),
859    [{
860      $_state.addAttribute("value", value);
861      $_state.addTypes(value.getType());
862    }]>
863  ];
864
865  let extraClassDeclaration = [{
866    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
867  }];
868}
869
870def TFL_SparseConstOp : Op<TFL_Dialect, "pseudo_sparse_const", [
871    NoSideEffect,
872    FirstAttrDerivedResultType,
873    QuantizableResult,
874    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
875  let summary = "Sparse constant pseudo op.";
876
877  let description = [{
878    Represents a sparse constant value in TensorFlow Lite dialect. This is not
879    an actual operation and it will be lowered to buffer instead.
880  }];
881
882  let arguments = (ins ElementsAttr:$value,
883                   SparsityParameterAttr:$s_param,
884                   ElementsAttr:$compressed_data);
885
886  let results = (outs AnyTensor:$output);
887
888  let builders = [
889    OpBuilder<(ins "TypedAttr":$value, "SparsityParameterAttr":$s_param,
890      "Attribute":$compressed_data),
891    [{
892      $_state.addTypes(value.getType());
893      $_state.addAttribute("value", value);
894      $_state.addAttribute("s_param", s_param);
895      $_state.addAttribute("compressed_data", compressed_data);
896    }]>
897  ];
898}
899
900def TFL_ExternalConstOp : Op<TFL_Dialect, "external_const", [
901    NoSideEffect,
902    QuantizableResult,
903    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
904  let summary = "External const op.";
905
906  let description = [{
907    External const op holds a `buffer_index` which points to a constant
908    in the flatbuffer.
909  }];
910
911  let arguments = (ins I32Attr:$buffer_index);
912
913  let results = (outs AnyTensor:$output);
914}
915
916def TFL_Conv2DOp : TFL_ConvOp<"conv_2d", "Convolution", 0,
917      [DeclareOpInterfaceMethods<InferTypeOpInterface>,
918       DeclareOpInterfaceMethods<TFL_ArithmeticCount>,
919       DynamicRangeQuantizedOpInterface]> {
920  let hasCanonicalizer = 1;
921
922  let extraClassDeclaration = [{
923    // AffineQuantizedOpInterface:
924    int GetChannelDimIndex() { return 0; }
925    int GetQuantizationDimIndex() { return 0; }
926    // SparseOpInterface:
927    std::vector<int> GetSparseOperands() { return {1}; }
928    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
929    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
930
931    // Returns whether the return types are compatible.
932    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
933
934    // DynamicRangeQuantizedOpInterface:
935    bool GetDynamicRangeQuantKernelSupport() { return true; }
936    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
937  }];
938}
939
940def TFL_CosOp: TFL_Op<"cos", [
941    NoSideEffect,
942    SameOperandsAndResultShape,
943    SameOperandsAndResultType]> {
944  let summary = "Cosine operator";
945
946  let description = [{
947    Computes element-wise Cosine of input
948  }];
949
950  let arguments = (ins TFL_FpTensor:$x);
951
952  let results = (outs TFL_FpTensor:$y);
953
954  let hasFolder = 1;
955}
956
957def TFL_CumsumOp: TFL_Op<"cumsum", [
958    NoSideEffect,
959    PredOpTrait<"input and output must have same element type",
960      TFL_TCresVTEtIsSameAsOp<0, 0>>,
961    TFL_OperandHasRank<1, 0>]> {
962  let summary = "Cumsum operator";
963
964  let description = [{
965    Compute the cumulative sum of the tensor x along axis.
966  }];
967
968  let arguments = (
969    ins TFL_TensorOf<[F32, I32, I64]>:$input,
970    TFL_I32Tensor:$axis,
971    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
972    DefaultValuedAttr<BoolAttr, "false">:$reverse
973  );
974
975  let results = (outs TFL_TensorOf<[F32, I32, I64]>:$output);
976
977  let hasOptions = 1;
978}
979
980def TFL_DepthwiseConv2DOp :
981    TFL_ConvOp<"depthwise_conv_2d", "Depthwise-separable convolution", 3,
982                [DeclareOpInterfaceMethods<TFL_ArithmeticCount>,
983                 DynamicRangeQuantizedOpInterface]> {
984  let arguments = (
985    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
986    TFL_TensorOf<[F32, QI8, QUI8]>:$filter,
987    TFL_1DTensorOfOrNone<[F32, I32, I64]>:$bias,
988    I32Attr:$dilation_h_factor,
989    I32Attr:$dilation_w_factor,
990    TFL_AFAttr:$fused_activation_function,
991    TFL_PaddingAttr:$padding,
992    I32Attr:$stride_h,
993    I32Attr:$stride_w,
994    I32Attr:$depth_multiplier
995  );
996
997  let hasCanonicalizer = 1;
998
999  let extraClassDeclaration = [{
1000    // AffineQuantizedOpInterface:
1001    int GetChannelDimIndex() { return 3; }
1002    int GetQuantizationDimIndex() { return 3; }
1003    // SparseOpInterface:
1004    std::vector<int> GetSparseOperands() { return {1}; }
1005    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
1006    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
1007    // DynamicRangeQuantizedOpInterface:
1008    bool GetDynamicRangeQuantKernelSupport() { return true; }
1009    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
1010  }];
1011}
1012
1013// TODO(jpienaar): Update post discussion on semantics of FC OP.
1014def TFL_FullyConnectedOp : TFL_Op<"fully_connected", [
1015    NoSideEffect, AccumulatorUniformScale<2, 0, 1>,
1016    AffineQuantizedOpInterface,
1017    AffineOpCoefficient<-1, 1>,
1018    TFL_SparseOp,
1019    DeclareOpInterfaceMethods<TFL_ArithmeticCount>,
1020    QuantizableResult,
1021    DynamicRangeQuantizedOpInterface]> {
1022  let summary = "Fully connected op";
1023
1024  let arguments = (ins
1025    TFL_TensorOf<[F32, QI8, QUI8, QI16, QUI16]>:$input,
1026    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$filter,
1027    TFL_TensorOfOrNone<[F32, QI32, QUI32]>:$bias,
1028
1029    TFL_AFAttr:$fused_activation_function,
1030    TFL_FullyConnectedOptionsWeightFormatAttr:$weights_format,
1031    BoolAttr:$keep_num_dims,
1032    // Used in post-training dynamic range quantization. If the value is true,
1033    // input activations are asymmetrically quantized.
1034    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs
1035  );
1036
1037  // Depending on the weights format, this op can have one or two outputs.
1038  let results = (outs
1039    TFL_VariadicTensorOf<[F32, QI8, QUI8, QI16, QUI16]>:$output
1040  );
1041
1042  let hasVerifier = 1;
1043
1044  let hasOptions = 1;
1045
1046  let hasCanonicalizer = 1;
1047
1048  let hasFolder = 1;
1049
1050  let extraClassDeclaration = [{
1051    // AffineQuantizedOpInterface:
1052    int GetChannelDimIndex() { return 0; }
1053    int GetQuantizationDimIndex() { return -1; }
1054    // SparseOpInterface:
1055    std::vector<int> GetSparseOperands() { return {1}; }
1056    std::vector<std::vector<int>> GetFloatBlockSize() { return {{1, 4}}; }
1057    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {{1, 16}}; }
1058    // DynamicRangeQuantizedOpInterface:
1059    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
1060    bool GetDynamicRangeQuantKernelSupport() { return true; }
1061    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
1062  }];
1063}
1064
1065def TFL_BatchMatMulOp : TFL_Op<"batch_matmul", [
1066   NoSideEffect,
1067   TFL_OperandHasAtleastRank<0, 2>,
1068   TFL_OperandHasAtleastRank<1, 2>,
1069   QuantizableResult,
1070   PredOpTrait<"x and output must have same element type or they are int8 and int32",
1071       Or<[TFL_TCresVTEtIsSameAsOp<0, 0>,
1072           And<[CPred<"getElementTypeOrSelf($_op.getOperand(0)).isInteger(8)">,
1073                CPred<"getElementTypeOrSelf($_op.getOperand(1)).isInteger(8)">,
1074                CPred<"getElementTypeOrSelf($_op.getResult(0)).isInteger(32)">]>]>>,
1075   TFL_RuntimePredOpTrait<"lhs and rhs of this op must have rank between [2, 5]",
1076     And<[TFL_OperandHasRankAtMostPred<0, 5>,
1077          TFL_OperandHasRankAtMostPred<1, 5>]>>,
1078   DynamicRangeQuantizedOpInterface]> {
1079
1080  let summary = "Batch Matrix Multiply Operator";
1081
1082  let description = [{
1083Performs a batched matrix multiplication on the inputs. Follows the
1084conventions of TensorFlow BatchMatMulV2, with support for unknown dimensions
1085in the batch dimensions and broadcasting.
1086
1087    Inputs:
1088      `inputs[0]`: required: input LHS
1089      `inputs[1]`: required: input RHS
1090      `adjoint_lhs`: optional: Transpose LHS (default false)
1091      `adjoint_lhs`: optional: Transpose LHS (default false)
1092  }];
1093
1094  let arguments = (ins
1095    TFL_TensorOf<[F32, QI8, QI16, I8]>:$x,
1096    TFL_TensorOf<[F32, QI8, QI16, I8]>:$y,
1097    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
1098    DefaultValuedAttr<BoolAttr, "false">:$adj_y,
1099    // Used in post-training dynamic range quantization. If the value is true,
1100    // input activations are asymmetrically quantized.
1101    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs
1102  );
1103
1104   let results = (outs
1105    TFL_TensorOf<[F32, QI8, QI16, I32]>:$output
1106  );
1107
1108  let hasOptions = 1;
1109
1110  let extraClassDeclaration = [{
1111    // DynamicRangeQuantizedOpInterface:
1112    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
1113    bool GetDynamicRangeQuantKernelSupport() { return true; }
1114    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
1115  }];
1116}
1117
1118def TFL_GatherOp : TFL_Op<"gather", [
1119    NoSideEffect,
1120    QuantizableResult,
1121    SameOperandsAndResultsScale,
1122    TFL_OperandHasAtleastRank<0, 1>,
1123    PredOpTrait<"params and output must have same element type",
1124      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1125    DynamicRangeQuantizedOpInterface
1126  ]> {
1127  let summary = "Gather operator";
1128
1129  let description = [{
1130    Gather slices from `params` axis `axis` according to `indices`.
1131  }];
1132
1133  let arguments = (ins
1134    TFL_TensorOf<[F32, I1, I8, I32, I64, TFL_Str, UI8, QI8, QUI8, QI16]>:$params,
1135    TFL_TensorOf<[I32, I64]>:$indices,
1136    I32Attr:$axis,
1137    DefaultValuedAttr<I32Attr, "0">:$batch_dims
1138  );
1139
1140  let builders =
1141  [
1142    OpBuilder<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis, "IntegerAttr":$batch_dims),
1143    [{ BuildGatherOp(&$_builder, $_state, params, indices, axis, batch_dims); }]>
1144  ];
1145
1146  let results = (outs
1147    TFL_TensorOf<[F32, I1, I8, I32, I64, TFL_Str, UI8, QI8, QUI8, QI16]>:$output
1148  );
1149
1150  let hasOptions = 1;
1151
1152  let hasVerifier = 1;
1153
1154  let extraClassDeclaration = [{
1155    // DynamicRangeQuantizedOpInterface:
1156    std::vector<int> GetQuantizableOperandIndices() { return {0}; }
1157  }];
1158}
1159
1160def TFL_GatherNdOp : TFL_Op<"gather_nd", [
1161    NoSideEffect,
1162    QuantizableResult,
1163    SameOperandsAndResultsScale,
1164    PredOpTrait<"params and output must have same element type",
1165      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1166  let summary = "Gather_nd operator";
1167
1168  let description = [{
1169    Gather slices from `params` into a Tensor with shape specified by `indices`.
1170  }];
1171
1172  let arguments = (ins
1173    TFL_TensorOf<[F32, I8, I16, I64, I32, UI8, TFL_Str]>:$params,
1174    TFL_I32OrI64Tensor:$indices
1175  );
1176
1177  let results = (outs
1178    TFL_TensorOf<[F32, I8, I16, I64, I32, UI8, TFL_Str]>:$output
1179  );
1180}
1181
1182def TFL_ScatterNdOp : TFL_Op<"scatter_nd", [
1183    NoSideEffect,
1184    QuantizableResult,
1185    SameOperandsAndResultsScale,
1186    TFL_OperandHasAtleastRank<0, 1>,
1187    TFL_OperandHasAtleastRank<1, 1>,
1188    PredOpTrait<"updates and output must have same element type",
1189      TFL_TCresVTEtIsSameAsOp<0, 1>>
1190  ]> {
1191  let summary = "Scatter_nd operator";
1192
1193  let description = [{
1194    Scatter `updates` into a new tensor according to `indices`
1195  }];
1196
1197  let arguments = (ins
1198    TFL_TensorOf<[I32]>:$indices,
1199    TFL_TensorOf<[F32, I8, I64, I32, UI8, I1]>:$updates,
1200    TFL_1DTensorOf<[I32]>:$shape
1201  );
1202
1203  let results = (outs
1204    TFL_TensorOf<[F32, I8, I64, I32, UI8, I1]>:$output
1205  );
1206
1207  let hasVerifier = 1;
1208
1209  let hasOptions = 1;
1210}
1211
1212// Same type check of lhs and rhs is handled by the ResultsBroadcastableShape trait.
1213def TFL_LessEqualOp : TFL_Op<"less_equal", [
1214    ResultsBroadcastableShape,
1215    QuantizableResult,
1216    ComparisonOpSameElementTypeConstraint,
1217    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1218    NoSideEffect]> {
1219  let summary = "Less_equal operator";
1220
1221  let description = [{
1222    Element-wise less_equal operation.
1223  }];
1224
1225  let arguments = (
1226      ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8]>:$lhs,
1227      TFL_TensorOf<[F32, I32, I64, QI8, QUI8]>:$rhs);
1228
1229  let results = (outs TFL_BoolTensor:$output);
1230
1231  let builders = [TFL_ComparisonBinaryBuilder];
1232
1233  let hasCustomAssemblyFormat = 1;
1234
1235  let extraClassDefinition = [{
1236    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1237      return parseOneResultSameOperandTypeOp(parser, result);
1238    }
1239    void $cppClass::print(OpAsmPrinter &p) {
1240      return printOneResultOp(getOperation(), p);
1241    }
1242  }];
1243
1244  let hasOptions = 0;
1245}
1246
1247def TFL_LocalResponseNormalizationOp : TFL_Op<"local_response_normalization", [
1248    TFL_OperandHasRank<0, 4>,
1249    SameOperandsAndResultShape,
1250    SameOperandsAndResultType,
1251    NoSideEffect]> {
1252  let summary = "Local Response Normalization.";
1253
1254  let description = [{
1255The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
1256dimension), and each vector is normalized independently.  Within a given vector,
1257each component is divided by the weighted, squared sum of inputs within
1258`depth_radius`.  In detail,
1259
1260    sqr_sum[a, b, c, d] =
1261        sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
1262    output = input / (bias + alpha * sqr_sum) ** beta
1263
1264For details, see [Krizhevsky et al., ImageNet classification with deep
1265convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
1266  }];
1267
1268  let arguments = (ins
1269      TFL_FpTensor:$input,
1270      I32Attr:$radius,
1271      F32Attr:$bias,
1272      F32Attr:$alpha,
1273      F32Attr:$beta
1274  );
1275
1276  let results = (outs
1277    TFL_FpTensor:$output
1278  );
1279
1280  let hasOptions = 1;
1281}
1282
1283def TFL_GreaterEqualOp : TFL_Op<"greater_equal", [
1284    QuantizableResult,
1285    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1286    ResultsBroadcastableShape,
1287    ComparisonOpSameElementTypeConstraint,
1288    NoSideEffect]> {
1289  let summary = "Greater_equal operator";
1290
1291  let description = [{
1292    Element-wise greater_equal operation.
1293  }];
1294
1295  let arguments = (
1296      ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8]>:$lhs,
1297      TFL_TensorOf<[F32, I32, I64, QUI8, QI8]>:$rhs);
1298
1299  let results = (outs TFL_BoolTensor:$output);
1300
1301  let builders = [TFL_ComparisonBinaryBuilder];
1302
1303  let hasCustomAssemblyFormat = 1;
1304
1305  let extraClassDefinition = [{
1306    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1307      return parseOneResultSameOperandTypeOp(parser, result);
1308    }
1309    void $cppClass::print(OpAsmPrinter &p) {
1310      return printOneResultOp(getOperation(), p);
1311    }
1312  }];
1313
1314  let hasOptions = 0;
1315}
1316
1317def TFL_MatrixDiagOp : TFL_Op<"matrix_diag", [
1318  QuantizableResult,
1319  NoSideEffect,
1320  TFL_OperandHasAtleastRank<0, 1>,
1321  PredOpTrait<"operand and result must have the same element type",
1322    TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1323  let summary = [{
1324    Returns a tensor with the provided diagonal and everything else padded with zeros.
1325  }];
1326
1327  let description = [{
1328    Given a diagonal, returns a tensor with the diagonal and everything else padded with zeros.
1329    Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a tensor of rank `k+1`
1330    with dimensions `[I, J, K, ..., N, N]` where:
1331       `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n].`
1332  }];
1333
1334  let arguments = (ins
1335    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QUI8, QI8, TFL_Quint8]>:$diagonal
1336  );
1337
1338  let results = (outs
1339    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QUI8, QI8, TFL_Quint8]>:$output
1340  );
1341
1342  let hasOptions = 0;
1343}
1344
1345def TFL_MatrixSetDiagOp : TFL_Op<"matrix_set_diag", [
1346    QuantizableResult,
1347    TFL_OperandHasAtleastRank<0, 2>,
1348    PredOpTrait<"input and result must have the same element type",
1349      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1350    NoSideEffect]> {
1351  let summary = [{
1352    Returns a batched matrix tensor with new batched diagonal values.
1353  }];
1354
1355  let description = [{
1356Given `input` and `diagonal`, this operation returns a tensor with the
1357same shape and values as `input`, except for the main diagonal of the
1358innermost matrices.  These will be overwritten by the values in `diagonal`.
1359  }];
1360
1361  let arguments = (ins
1362    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$input,
1363    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$diagonal
1364  );
1365
1366  let results = (outs
1367    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$result
1368  );
1369
1370  let hasOptions = 0;
1371}
1372
1373// These ops are named NonMaxSuppressionV4 & NonMaxSuppressionV5 to be
1374// consistent with TensorFlow's naming. They are NOT 'versions' of NMS in the
1375// sense that one is an incremental change over the other.
1376// In reality NonMaxSuppressionV5 implements Soft Non Max Suppression and
1377// NonMaxSuppressionV4 performs hard NMS.
1378
1379def TFL_NonMaxSuppressionV4Op : TFL_Op<"non_max_suppression_v4", [
1380  NoSideEffect,
1381  // Operand 0 (boxes) should have rank 2 with the dim[1] == 4 (box corners)
1382  TFL_OperandHasRank<0, 2>,
1383  PredOpTrait<"boxes should have dim[1] == 4",
1384      TFL_OperandDimEquals<0, 1, 4>>,
1385  // Operand 1 (scores) should be a 1-dim tensor
1386  TFL_OperandHasRank<1, 1>,
1387  // Other operands are scalar params.
1388  TFL_OperandHasRank<2, 0>, TFL_OperandHasRank<3, 0>,
1389  TFL_OperandHasRank<4, 0>]> {
1390  let summary = [{
1391Greedily selects a subset of bounding boxes in descending order of score,
1392  }];
1393
1394  let description = [{
1395pruning away boxes that have high intersection-over-union (IOU) overlap
1396with previously selected boxes.  Bounding boxes with score less than
1397`score_threshold` are removed.  Bounding boxes are supplied as
1398[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
1399diagonal pair of box corners and the coordinates can be provided as normalized
1400(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
1401is agnostic to where the origin is in the coordinate system and more
1402generally is invariant to orthogonal transformations and translations
1403of the coordinate system; thus translating or reflections of the coordinate
1404system result in the same boxes being selected by the algorithm.
1405The output of this operation is a set of integers indexing into the input
1406collection of bounding boxes representing the selected boxes.  The bounding
1407box coordinates corresponding to the selected indices can then be obtained
1408using the `tf.gather operation`.  For example:
1409  selected_indices = tf.image.non_max_suppression_v2(
1410      boxes, scores, max_output_size, iou_threshold, score_threshold)
1411  selected_boxes = tf.gather(boxes, selected_indices)
1412  }];
1413
1414  let arguments = (ins
1415    TFL_FpTensor:$boxes,
1416    TFL_FpTensor:$scores,
1417    TFL_I32Tensor:$max_output_size,
1418    TFL_FpTensor:$iou_threshold,
1419    TFL_FpTensor:$score_threshold
1420  );
1421
1422  let results = (outs
1423    TFL_I32Tensor:$selected_indices,
1424    TFL_I32Tensor:$valid_outputs
1425  );
1426}
1427
1428def TFL_NonMaxSuppressionV5Op : TFL_Op<"non_max_suppression_v5", [
1429  NoSideEffect,
1430  // Operand 0 (boxes) should have rank 2 with the dim[1] == 4 (box corners)
1431  TFL_OperandHasRank<0, 2>,
1432  PredOpTrait<"boxes should have dim[1] == 4",
1433      TFL_OperandDimEquals<0, 1, 4>>,
1434  // Operand 1 (scores) should be a 1-dim tensor
1435  TFL_OperandHasRank<1, 1>,
1436  // Other operands are scalar params.
1437  TFL_OperandHasRank<2, 0>, TFL_OperandHasRank<3, 0>,
1438  TFL_OperandHasRank<4, 0>, TFL_OperandHasRank<5, 0>]> {
1439  let summary = [{
1440Greedily selects a subset of bounding boxes in descending order of score,
1441  }];
1442
1443  let description = [{
1444pruning away boxes that have high intersection-over-union (IOU) overlap
1445with previously selected boxes.  Bounding boxes with score less than
1446`score_threshold` are removed.  Bounding boxes are supplied as
1447[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
1448diagonal pair of box corners and the coordinates can be provided as normalized
1449(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
1450is agnostic to where the origin is in the coordinate system and more
1451generally is invariant to orthogonal transformations and translations
1452of the coordinate system; thus translating or reflections of the coordinate
1453system result in the same boxes being selected by the algorithm.
1454The output of this operation is a set of integers indexing into the input
1455collection of bounding boxes representing the selected boxes.  The bounding
1456box coordinates corresponding to the selected indices can then be obtained
1457using the `tf.gather operation`.  For example:
1458  selected_indices = tf.image.non_max_suppression_v2(
1459      boxes, scores, max_output_size, iou_threshold, score_threshold)
1460  selected_boxes = tf.gather(boxes, selected_indices)
1461This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
1462Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
1463of other overlapping boxes instead of directly causing them to be pruned.
1464To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
1465larger than 0.
1466  }];
1467
1468  let arguments = (ins
1469    TFL_FpTensor:$boxes,
1470    TFL_FpTensor:$scores,
1471    TFL_I32Tensor:$max_output_size,
1472    TFL_FpTensor:$iou_threshold,
1473    TFL_FpTensor:$score_threshold,
1474    TFL_FpTensor:$soft_nms_sigma
1475  );
1476
1477  let results = (outs
1478    TFL_I32Tensor:$selected_indices,
1479    TFL_FpTensor:$selected_scores,
1480    TFL_I32Tensor:$valid_outputs
1481  );
1482}
1483
1484def TFL_NotEqualOp : TFL_Op<"not_equal", [
1485    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1486    ComparisonOpSameElementTypeConstraint,
1487    ResultsBroadcastableShape,
1488    Commutative,
1489    NoSideEffect]> {
1490  let summary = "Not_equal operator";
1491
1492  let description = [{
1493    Element-wise not_equal operation.
1494  }];
1495
1496  let arguments = (
1497      ins TFL_TensorOf<[I1, F32, I32, I64, QUI8, QI8, TFL_Quint8, TFL_Str]>:$lhs,
1498      TFL_TensorOf<[I1, F32, I32, I64, QUI8, QI8, TFL_Quint8, TFL_Str]>:$rhs);
1499
1500  let results = (outs TFL_BoolTensor:$output);
1501
1502  let builders =
1503  [
1504    OpBuilder<(ins "Value":$lhs, "Value":$rhs),
1505    [{
1506        buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
1507      }]>
1508  ];
1509
1510  let hasCustomAssemblyFormat = 1;
1511
1512  let extraClassDefinition = [{
1513    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1514      return parseOneResultSameOperandTypeOp(parser, result);
1515    }
1516    void $cppClass::print(OpAsmPrinter &p) {
1517      return printOneResultOp(getOperation(), p);
1518    }
1519  }];
1520}
1521
1522def TFL_DivOp : TFL_Op<"div", [
1523    // TODO(fengliuai): NoQuantizableResult is only correct for int8
1524    // quantization. update to handle Uint8 quantization.
1525    BinaryOpSameElementTypeConstraint,
1526    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
1527    ResultsBroadcastableShape,
1528    NoSideEffect,
1529    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
1530  let summary = "Division operator";
1531
1532  let description = [{
1533    Element-wise division operation.
1534  }];
1535
1536  let arguments = (
1537      ins TFL_TensorOf<[F32, I32, QUI8]>:$lhs,
1538      TFL_TensorOf<[F32, I32, QUI8]>:$rhs,
1539      TFL_AFAttr:$fused_activation_function);
1540
1541  let results = (outs TFL_TensorOf<[F32, I32, QUI8]>:$output);
1542
1543  let builders = [TFL_FusedBroadcastableBinaryBuilder];
1544
1545  let hasCustomAssemblyFormat = 1;
1546
1547  let extraClassDefinition = [{
1548    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1549      return parseOneResultSameOperandTypeOp(parser, result);
1550    }
1551    void $cppClass::print(OpAsmPrinter &p) {
1552      return printOneResultOp(getOperation(), p);
1553    }
1554  }];
1555
1556  let hasOptions = 1;
1557
1558  let hasFolder = 1;
1559}
1560
1561def TFL_EluOp: TFL_Op<"elu", [
1562    NoSideEffect,
1563    SameOperandsAndResultShape,
1564    TFL_SameFirstOperandAndFirstResultElementType]> {
1565  let summary = "Exponential Linear Unit operator";
1566  let description = [{
1567    Computes the exponential linear
1568      f(x) -> exp(x) - 1 for x < 0, x for x >= 0.
1569    element-wise.
1570  }];
1571
1572  let arguments = (ins TFL_TensorOf<[F32, I8]>:$x);
1573
1574  let results = (outs TFL_TensorOf<[F32, I8]>:$y);
1575
1576  let hasOptions = 0;
1577}
1578
1579def TFL_EmbeddingLookupOp: TFL_Op<"embedding_lookup",
1580    [NoSideEffect,
1581     PredOpTrait<"value and output must have same element type",
1582       TFL_TCresVTEtIsSameAsOp<0, 1>>,
1583     TFL_OperandHasRank<0, 1>,
1584     TFL_OperandHasRankAtLeast<1, 2>,
1585     DynamicRangeQuantizedOpInterface,
1586     QuantizableResult
1587    ]> {
1588  let summary = "Embedding lookup operator";
1589
1590  let description = [{
1591    Looks up ids in a list of embedding tensors.
1592  }];
1593
1594  let arguments = (ins
1595    TFL_TensorOf<[I32]>:$lookup,
1596    TFL_TensorOf<[F32, I8, UI8]>:$value
1597   );
1598
1599  let results = (outs TFL_TensorOf<[F32, I8, UI8]>:$output);
1600
1601  let extraClassDeclaration = [{
1602    // DynamicRangeQuantizedOpInterface:
1603    std::vector<int> GetQuantizableOperandIndices() { return {1}; }
1604  }];
1605}
1606
1607def TFL_EqualOp: TFL_Op<"equal", [
1608    Commutative,
1609    NoSideEffect,
1610    ResultsBroadcastableShape,
1611    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1612    QuantizableResult,
1613    ComparisonOpSameElementTypeConstraint]> {
1614  let summary = "Equal operator";
1615
1616  let description = [{
1617    Returns the truth element of x == y element-wise
1618  }];
1619
1620  let arguments = (
1621    ins
1622    TFL_TensorOf<[I1, F32, I32, I64, QI8, QUI8, UI8, TFL_Str]>:$x,
1623    TFL_TensorOf<[I1, F32, I32, I64, QI8, QUI8, UI8, TFL_Str]>:$y
1624  );
1625
1626  let results = (outs TFL_BoolTensor:$output);
1627
1628  let builders = [TFL_ComparisonBinaryBuilder];
1629}
1630
1631def TFL_ExpOp: TFL_Op<"exp", [
1632    NoSideEffect,
1633    SameOperandsAndResultType]> {
1634  let summary = "Natural exponentiation operator";
1635
1636  let description = [{
1637    Performs element-wise natural exponentiation operation on input.
1638  }];
1639
1640  let arguments = (ins TFL_FpTensor:$x);
1641
1642  let results = (outs TFL_FpTensor:$y);
1643
1644  let hasOptions = 0b1;
1645}
1646
1647def TFL_ExpandDimsOp: TFL_Op<"expand_dims", [
1648    NoSideEffect,
1649    SameOperandsAndResultsScale,
1650    QuantizableResult,
1651    PredOpTrait<"input and output must have same element type",
1652      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1653  let summary = "Inserts a dimension of 1 into a tensor's shape.";
1654
1655  let description = [{
1656Given a tensor `input`, this operation inserts a dimension of 1 at the
1657dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
1658zero; if you specify a negative number for `axis` it is counted backward from
1659the end.
1660
1661This operation is useful if you want to add a batch dimension to a single
1662element. For example, if you have a single image of shape `[height, width,
1663channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
1664which will make the shape `[1, height, width, channels]`.
1665
1666Other examples:
1667
1668```
1669# 't' is a tensor of shape [2]
1670shape(expand_dims(t, 0)) ==> [1, 2]
1671shape(expand_dims(t, 1)) ==> [2, 1]
1672shape(expand_dims(t, -1)) ==> [2, 1]
1673
1674# 't2' is a tensor of shape [2, 3, 5]
1675shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
1676shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
1677shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
1678```
1679
1680This operation requires that:
1681
1682`-1-input.dims() <= dim <= input.dims()`
1683
1684This operation is related to `squeeze()`, which removes dimensions of
1685size 1.
1686  }];
1687
1688  // TODO: Restriction on dim's size and valid range are not modeled here.
1689  let arguments = (ins AnyTensor:$input, TFL_I32OrI64Tensor:$dim);
1690
1691  let results = (outs AnyTensor:$output);
1692
1693  let hasOptions = 1;
1694}
1695
1696def TFL_SqueezeOp: TFL_Op<"squeeze", [NoSideEffect,
1697                                      QuantizableResult,
1698                                      SameOperandsAndResultsScale]> {
1699  let summary = "Removes dimensions of size 1 from the shape of a tensor.";
1700
1701  let description = [{
1702Given a tensor `input`, this operation returns a tensor of the same type with
1703all dimensions of size 1 removed. If you don't want to remove all size 1
1704dimensions, you can remove specific size 1 dimensions by specifying
1705`squeeze_dims`.
1706
1707For example:
1708
1709```
1710# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1711shape(squeeze(t)) ==> [2, 3]
1712```
1713
1714Or, to remove specific size 1 dimensions:
1715
1716```
1717# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1718shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
1719```
1720  }];
1721
1722  let arguments = (ins
1723    AnyTensor:$input,
1724    ConfinedAttr<DefaultValuedAttr<I64ArrayAttr, "{}">, [TFL_ArrayMaxCount<8>]>:$squeeze_dims
1725  );
1726
1727  let results = (outs
1728    AnyTensor:$output
1729  );
1730
1731  let hasFolder = 1;
1732  let hasOptions = 1;
1733
1734  let customOption = "SqueezeOptions";
1735}
1736
1737def TFL_FillOp: TFL_Op<"fill", [
1738    NoSideEffect,
1739    SameOperandsAndResultsScale,
1740    QuantizableResult,
1741    PredOpTrait<"input and result must have same element type",
1742      TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
1743  let summary = "Fill the tensor with given value.";
1744  let description = [{
1745    Fill the tensor with given value.
1746  }];
1747
1748  let arguments = (ins TFL_I32OrI64Tensor:$dims,
1749                   TFL_TensorOf<[F32, I32, I64, I1, QI8, QI16, TFL_Str]>:$input);
1750
1751  let results = (outs TFL_TensorOf<[F32, I32, I64, I1, QI8, QI16, TFL_Str]>:$result);
1752
1753  let hasOptions = 0;
1754}
1755
1756def TFL_FloorOp: TFL_Op<"floor", [
1757    NoSideEffect,
1758    SameOperandsAndResultShape,
1759    SameOperandsAndResultType]> {
1760  let summary = "Floor operator";
1761
1762  let description = [{
1763    Returns element-wise floor value of the input.
1764  }];
1765
1766  let arguments = (ins TFL_FpTensor:$x);
1767
1768  let results = (outs TFL_FpTensor:$y);
1769}
1770
1771def TFL_FloorDivOp : TFL_Op<"floor_div", [
1772    ResultsBroadcastableShape,
1773    NoSideEffect,
1774    BinaryOpSameElementTypeConstraint,
1775    PredOpTrait<"lhs and output must have same element type",
1776      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1777    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>]> {
1778  let summary = "Floor div operator";
1779
1780  let description = [{
1781    Element-wise floor div operation.
1782  }];
1783
1784  let arguments = (
1785    ins TFL_TensorOf<[F32, I32]>:$lhs, TFL_TensorOf<[F32, I32]>:$rhs);
1786
1787  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
1788
1789  let builders = [TFL_BroadcastableBinaryBuilder];
1790
1791  let hasCustomAssemblyFormat = 1;
1792
1793  let extraClassDefinition = [{
1794    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1795      return parseOneResultSameOperandTypeOp(parser, result);
1796    }
1797    void $cppClass::print(OpAsmPrinter &p) {
1798      return printOneResultOp(getOperation(), p);
1799    }
1800  }];
1801}
1802
1803def TFL_FloorModOp : TFL_Op<"floor_mod", [
1804    ResultsBroadcastableShape,
1805    NoSideEffect,
1806    BinaryOpSameElementTypeConstraint,
1807    PredOpTrait<"lhs and output must have same element type",
1808      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1809    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>]> {
1810  let summary = "Division reminder";
1811
1812  let description = [{
1813    Element-wise division reminder operation.
1814  }];
1815
1816  let arguments = (
1817    ins TFL_TensorOf<[I32, I64, F32]>:$lhs,
1818    TFL_TensorOf<[I32, I64, F32]>:$rhs);
1819
1820  let results = (outs TFL_TensorOf<[I32, I64, F32]>:$output);
1821
1822  let builders = [TFL_BroadcastableBinaryBuilder];
1823}
1824
1825def TFL_GreaterOp : TFL_Op<"greater", [
1826    ResultsBroadcastableShape,
1827    ComparisonOpSameElementTypeConstraint,
1828    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1829    QuantizableResult,
1830    NoSideEffect]> {
1831  let summary = "Greater operator";
1832
1833  let description = [{
1834    Element-wise greater operation.
1835  }];
1836
1837  let arguments = (
1838    ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$lhs,
1839    TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$rhs);
1840
1841  let results = (outs TFL_BoolTensor:$output);
1842
1843  let builders = [TFL_ComparisonBinaryBuilder];
1844
1845  let hasCustomAssemblyFormat = 1;
1846
1847  let extraClassDefinition = [{
1848    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1849      return parseOneResultSameOperandTypeOp(parser, result);
1850    }
1851    void $cppClass::print(OpAsmPrinter &p) {
1852      return printOneResultOp(getOperation(), p);
1853    }
1854  }];
1855}
1856
1857def TFL_HardSwishOp: TFL_Op<"hard_swish", [
1858    NoSideEffect,
1859    SameOperandsAndResultShape,
1860    QuantizableResult,
1861    PredOpTrait<"input and output must have same element type",
1862      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1863  let summary = "Hardswish activation function.";
1864  let description = [{
1865    Computes hard-swish activation function
1866      f(x) -> (x * relu6(x+3))/6
1867    element-wise.
1868  }];
1869
1870  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$input);
1871
1872  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$output);
1873
1874  let hasOptions = 0;
1875}
1876
1877def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect,
1878    FixedOutputRangeInterface,
1879    QuantizableResult,
1880    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
1881  let summary = "L2 Normalize Operator";
1882
1883  let description = [{
1884    L2Normalization Op
1885  }];
1886
1887  let arguments = (ins
1888    TFL_TensorOf<[F32, QUI8, QI8, QUI16, QI16, I8]>:$input,
1889    TFL_AFAttr:$fused_activation_function
1890  );
1891
1892  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QUI16, QI16, I8]>:$output);
1893
1894  let hasOptions = 1;
1895
1896  let customOption = "L2NormOptions";
1897
1898  let extraClassDeclaration = [{
1899  // FixedOutputRangeInterface:
1900  quant::UniformQuantizedType GetFixedOutputRange(
1901      bool is_signed, int bit_width) {
1902    auto result_type = output().getType();
1903    // central_value = min_value / 2 + (max_value - 1) / 2 + 1
1904    // zero_point = central_value
1905    // scale = 1. / (central_value - min_value)
1906    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
1907        /*scale=*/1.0 / 128, /*zero_point=*/0);
1908  }
1909  }];
1910}
1911
1912def TFL_LeakyReluOp: TFL_Op<"leaky_relu", [
1913    SameOperandsAndResultShape,
1914    QuantizableResult,
1915    NoSideEffect,
1916    PredOpTrait<"input and output must have same element type",
1917      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1918  let summary = "Leaky Relu operator";
1919
1920  let description = [{
1921    Element-wise Leaky ReLU operator
1922      x -> x >= 0 ? x : (alpha * x)
1923  }];
1924
1925  let arguments = (
1926    ins TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8, QI16]>:$input,
1927    // Slope of the activation function at x < 0.
1928    F32Attr:$alpha
1929  );
1930
1931  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8, QI16]>:$output);
1932
1933  let hasOptions = 0b1;
1934}
1935
1936def TFL_LessOp : TFL_Op<"less", [
1937    ResultsBroadcastableShape,
1938    ComparisonOpSameElementTypeConstraint,
1939    QuantizableResult,
1940    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1941    NoSideEffect]> {
1942  let summary = "Less operator";
1943
1944  let description = [{
1945    Element-wise less operation.
1946  }];
1947
1948  let arguments = (
1949    ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$lhs,
1950    TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$rhs);
1951
1952  let results = (outs TFL_BoolTensor:$output);
1953
1954  let builders = [TFL_ComparisonBinaryBuilder];
1955
1956  let hasCustomAssemblyFormat = 1;
1957
1958  let extraClassDefinition = [{
1959    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1960      return parseOneResultSameOperandTypeOp(parser, result);
1961    }
1962    void $cppClass::print(OpAsmPrinter &p) {
1963      return printOneResultOp(getOperation(), p);
1964    }
1965  }];
1966}
1967
1968def TFL_LogicalAndOp : TFL_Op<"logical_and", [NoSideEffect]> {
1969  let summary = "Logical AND operator";
1970
1971  let description = [{
1972    Element-wise logical AND operation.
1973  }];
1974
1975  let arguments = (
1976    ins TFL_BoolTensor:$lhs,
1977    TFL_BoolTensor:$rhs);
1978
1979  let results = (outs TFL_BoolTensor:$output);
1980
1981  let hasCustomAssemblyFormat = 1;
1982
1983  let extraClassDefinition = [{
1984    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
1985      return parseOneResultSameOperandTypeOp(parser, result);
1986    }
1987    void $cppClass::print(OpAsmPrinter &p) {
1988      return printOneResultOp(getOperation(), p);
1989    }
1990  }];
1991}
1992
1993def TFL_LogicalNotOp : TFL_Op<"logical_not", [
1994    NoSideEffect,
1995    SameOperandsAndResultShape]> {
1996  let summary = "Logical NOT operator";
1997
1998  let description = [{
1999    Element-wise logical NOT operation.
2000  }];
2001
2002  let arguments = (ins TFL_BoolTensor:$lhs);
2003
2004  let results = (outs TFL_BoolTensor:$output);
2005}
2006
2007def TFL_LogicalOrOp : TFL_Op<"logical_or", [NoSideEffect]> {
2008  let summary = "Logical OR operator";
2009
2010  let description = [{
2011    Element-wise logical OR operation.
2012  }];
2013
2014  let arguments = (
2015    ins TFL_BoolTensor:$lhs,
2016    TFL_BoolTensor:$rhs);
2017
2018  let results = (outs TFL_BoolTensor:$output);
2019
2020  let hasCustomAssemblyFormat = 1;
2021
2022  let extraClassDefinition = [{
2023    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
2024      return parseOneResultSameOperandTypeOp(parser, result);
2025    }
2026    void $cppClass::print(OpAsmPrinter &p) {
2027      return printOneResultOp(getOperation(), p);
2028    }
2029  }];
2030}
2031
2032def TFL_LogisticOp: TFL_Op<"logistic", [
2033    NoSideEffect,
2034    PredOpTrait<"x and y must have same element type",
2035      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2036    SameOperandsAndResultShape,
2037    FixedOutputRangeInterface,
2038    QuantizableResult,
2039    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
2040  let summary = "Logistic operator";
2041
2042  let description = [{
2043    Computes element-wise Sigmoid of input
2044  }];
2045
2046  let arguments = (ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$x);
2047
2048  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$y);
2049
2050  let extraClassDeclaration = [{
2051  // FixedOutputRangeInterface:
2052  quant::UniformQuantizedType GetFixedOutputRange(
2053      bool is_signed, int bit_width) {
2054    auto result_type = y().getType();
2055    // zero_point = 0
2056    // scale = 1. / (max_value + 1)
2057    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
2058        /*scale=*/1.0 / 256, /*zero_point=*/-128);
2059  }
2060  }];
2061
2062  // This builder doesn't work with quantized type, so it can only be used by
2063  // non-quantization tablegen patterns. Currently, it is used by the
2064  // elementwise-move reordering pattern in the optimize_patterns.td
2065  let builders = [
2066    OpBuilder<(ins "Value":$input),
2067    [{
2068      $_state.addOperands({input});
2069      $_state.addTypes(input.getType());
2070    }]>
2071  ];
2072}
2073
2074def TFL_LogOp: TFL_Op<"log", [
2075    NoSideEffect,
2076    SameOperandsAndResultShape,
2077    SameOperandsAndResultType]> {
2078  let summary = "Natural logarithm operator";
2079
2080  let description = [{
2081    Performs element-wise natural logarithm operation on input.
2082  }];
2083
2084  let arguments = (ins TFL_FpTensor:$x);
2085
2086  let results = (outs TFL_FpTensor:$y);
2087
2088  let hasFolder = 1;
2089}
2090
2091def TFL_LogSoftmaxOp : TFL_Op<"log_softmax", [
2092    NoSideEffect,
2093    SameOperandsAndResultShape,
2094    PredOpTrait<"x and y must have same element type",
2095      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2096    FixedOutputRangeInterface,
2097    QuantizableResult,
2098    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
2099  let summary = "Log softmax operator";
2100
2101  let description = [{
2102    Computes element-wise log softmax activations with the following formula
2103
2104      input - log(reduce_sum(exp(input), dim))
2105  }];
2106
2107  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$input);
2108
2109  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$output);
2110
2111  let hasOptions = 1;
2112
2113  let extraClassDeclaration = [{
2114  // FixedOutputRangeInterface:
2115  quant::UniformQuantizedType GetFixedOutputRange(
2116      bool is_signed, int bit_width) {
2117    auto result_type = output().getType();
2118    // zero_point = max_value
2119    // scale = -log_softmax_output_min / (max_value + 1)
2120    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
2121        /*scale=*/16.0 / 256, /*zero_point=*/127);
2122  }
2123  }];
2124}
2125
2126// TODO(ashwinm): Revisit the granularity of the PredOpTraits. We could
2127// break this into smaller PredOpTraits, each with more descriptive messages
2128// that would make it easier to trace failures OR, need a way to specify desc
2129// per Predicate inside the trait and get tablegen to use that to emit error
2130// message.
2131def MaxPoolOperandAndResultConstraints : PredOpTrait<"MaxPool2D operand and "
2132    "result types match specified constraints",
2133  And<[
2134    // The input and output tensors should have the same elemental type
2135    // and they should be one of the specified types below.
2136    TCopVTEtIs<0, AnyTypeOf<[F32, QI8, QUI8]>>,
2137    TFL_TCresVTEtIsSameAsOp<0, 0>]>>;
2138
2139def TFL_MaxPool2DOp : TFL_Op<"max_pool_2d", [
2140    TFL_OperandHasRank<0, 4>,
2141    PredOpTrait<"input and output must have same element type",
2142      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2143    NoSideEffect,
2144    MaxPoolOperandAndResultConstraints,
2145    SameOperandsAndResultsScale,
2146    QuantizableResult,
2147    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
2148  let summary = "Max Pool 2D op";
2149
2150  let description = [{
2151    Performs max pool 2D on input.
2152
2153    Inputs:
2154      `inputs[0]`: required: the input tensor
2155  }];
2156
2157  let arguments = (
2158    ins TFL_TensorOf<[F32, QUI8, QI8, QI16, TFL_Quint8]>:$input,
2159    TFL_PaddingAttr:$padding,
2160    I32Attr:$stride_w,
2161    I32Attr:$stride_h,
2162    I32Attr:$filter_width,
2163    I32Attr:$filter_height,
2164    TFL_AFAttr:$fused_activation_function
2165  );
2166
2167  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QI16, TFL_Quint8]>:$output);
2168
2169  let hasOptions = 1;
2170
2171  let customOption = "Pool2DOptions";
2172}
2173
2174def TFL_MaximumOp : TFL_Op<"maximum", [
2175    ResultsBroadcastableShape,
2176    NoSideEffect,
2177    QuantizableResult,
2178    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
2179    Commutative,
2180    SameOperandsAndResultsScale]> {
2181  let summary = "Max operator";
2182  let description = [{
2183    Element-wise max operation.
2184  }];
2185
2186  let arguments = (
2187    ins TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$lhs,
2188    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$rhs
2189  );
2190
2191  let results = (outs
2192    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$max
2193  );
2194
2195  let builders = [TFL_BroadcastableBinaryBuilder];
2196
2197  let hasOptions = 0;
2198}
2199
2200def TFL_MeanOp : TFL_Op<"mean", [
2201    PredOpTrait<"input and output must have same element type",
2202      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2203    QuantizableResult,
2204    NoSideEffect]> {
2205  let summary = "Mean operator";
2206
2207  let description = [{
2208    Computes the mean of elements across dimensions of a tensor.
2209    Reduces input_tensor along the dimensions given in axis.
2210    Unless keepdims is true, the rank of the tensor is reduced by 1 for
2211    each entry in axis. If keepdims is true, the reduced dimensions are retained
2212    with length 1.
2213  }];
2214
2215  let arguments = (ins
2216    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, UI8, QI16]>:$input,
2217    TFL_TensorOf<[I32, I64]>:$axis,
2218    BoolAttr:$keep_dims
2219  );
2220
2221  let results = (outs
2222    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, UI8, QI16]>:$output);
2223
2224  let hasOptions = 1;
2225  let customOption = "ReducerOptions";
2226}
2227
2228def TFL_OneHotOp : TFL_Op<"one_hot", [
2229    QuantizableResult,
2230    NoSideEffect]> {
2231  let summary = "OneHot operator";
2232
2233  let description = [{
2234    Returns a one-hot tensor.The locations represented by indices in `indices`
2235    take value `on_value`, while all other locations take value `off_value`.
2236
2237    If the input `indices` is rank `N`, the output will have rank `N+1`,
2238    The new axis is created at dimension `axis` (default: the new axis is
2239    appended at the end).
2240  }];
2241
2242  let arguments = (ins
2243    TFL_TensorOf<[I32, I64]>:$indices,
2244    TFL_I32Tensor:$depth,
2245    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$on_value,
2246    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$off_value,
2247
2248    I32Attr:$axis
2249  );
2250
2251  let results = (outs
2252    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$output
2253  );
2254
2255  let hasOptions = 1;
2256}
2257
2258def TFL_RoundOp: TFL_Op<"round", [
2259    NoSideEffect,
2260    SameOperandsAndResultShape,
2261    SameOperandsAndResultType]> {
2262  let summary = "Round operator";
2263
2264  let description = [{
2265Rounds the values of a tensor to the nearest integer, element-wise.
2266  }];
2267
2268  let arguments = (ins
2269    TFL_FpTensor:$x
2270  );
2271
2272  let results = (outs
2273    TFL_FpTensor:$y
2274  );
2275}
2276
2277def TFL_SliceOp : TFL_Op<"slice", [
2278    QuantizableResult,
2279    PredOpTrait<"input and output must have same element type",
2280      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2281    NoSideEffect,
2282    SameOperandsAndResultsScale,
2283    TFL_OperandHasRankAtMost<0, 5>,
2284    TFL_OperandHasRankAtMost<1, 1>,
2285    TFL_OperandHasRankAtMost<2, 1>]> {
2286  let summary = "Return a slice from 'input'.";
2287
2288  let description = [{
2289The output tensor is a tensor with dimensions described by 'size'
2290whose values are extracted from 'input' starting at the offsets in
2291'begin'.
2292
2293`begin` is zero-based; `size` is one-based. If size[i] is -1, all remaining
2294elements in dimension i are included in the slice. In other words, this is
2295equivalent to setting:
2296  size[i] = input.dim_size(i) - begin[i]
2297
2298*Requirements*:
2299  0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
2300  }];
2301
2302  let arguments = (ins
2303    TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2304    TFL_I32OrI64Tensor:$begin,
2305    TFL_I32OrI64Tensor:$size
2306  );
2307
2308  let results = (outs
2309    TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8, QI16]>:$output
2310  );
2311
2312  let hasVerifier = 1;
2313
2314  let hasCanonicalizer = 1;
2315}
2316
2317def TFL_SumOp: TFL_Op<"sum", [
2318    QuantizableResult,
2319    PredOpTrait<"input and output must have same element type",
2320      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2321    NoSideEffect,
2322    SameOperandsAndResultsScale]> {
2323
2324  let summary = "Sum operator";
2325
2326  let description = [{
2327    Computes the sum reduction along the specified axes
2328  }];
2329
2330  let arguments = (ins
2331    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2332    TFL_I32Tensor:$axes,
2333    BoolAttr:$keep_dims
2334  );
2335
2336  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2337
2338  let hasOptions = 1;
2339  let customOption = "ReducerOptions";
2340
2341  let extraClassDeclaration = [{
2342    // SameScalesOpInterface:
2343    bool RequiredSameOperandsAndResultsScale(bool sign, int bit_width) {
2344      // Eight-bit types don't require same operands and results scales.
2345      return bit_width != 8;
2346    }
2347  }];
2348}
2349
2350def TFL_ReduceMinOp: TFL_Op<"reduce_min", [
2351    PredOpTrait<"input and output must have same element type",
2352      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2353    NoSideEffect,
2354    QuantizableResult,
2355    SameOperandsAndResultsScale]> {
2356  let summary = "Min-reduction operator";
2357
2358  let description = [{
2359    Computes the min reduction along the specified axes
2360  }];
2361
2362  let arguments = (ins
2363    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2364    TFL_I32Tensor:$axes,
2365    BoolAttr:$keep_dims
2366  );
2367
2368  let results = (outs
2369    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2370
2371  let hasOptions = 1;
2372  let customOption = "ReducerOptions";
2373}
2374
2375def TFL_ReduceMaxOp: TFL_Op<"reduce_max", [
2376    PredOpTrait<"input and output must have same element type",
2377      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2378    NoSideEffect,
2379    QuantizableResult,
2380    SameOperandsAndResultsScale]> {
2381  let summary = "Max-reduction operator";
2382
2383  let description = [{
2384    Computes the max reduction along the specified axes
2385  }];
2386
2387  let arguments = (ins
2388    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2389    TFL_I32Tensor:$axes,
2390    BoolAttr:$keep_dims
2391  );
2392
2393  let results = (outs
2394    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2395
2396  let hasOptions = 1;
2397  let customOption = "ReducerOptions";
2398}
2399
2400def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [
2401    PredOpTrait<"input and output must have same element type",
2402      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2403    QuantizableResult,
2404    NoSideEffect]> {
2405  let summary = "Prod-reduction operator";
2406
2407  let description = [{
2408    Computes the product along the specified axes
2409  }];
2410
2411  let arguments = (ins
2412    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2413    TFL_I32Tensor:$axes,
2414    BoolAttr:$keep_dims
2415  );
2416
2417  let results = (outs
2418    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2419
2420  let hasOptions = 1;
2421  let customOption = "ReducerOptions";
2422}
2423
2424def TFL_MinimumOp : TFL_Op<"minimum", [
2425    ResultsBroadcastableShape,
2426    NoSideEffect,
2427    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
2428    Commutative,
2429    QuantizableResult,
2430    SameOperandsAndResultsScale]> {
2431  let summary = "Min operator";
2432  let description = [{
2433    Element-wise min operation.
2434  }];
2435
2436  let arguments = (
2437    ins TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$lhs,
2438    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$rhs
2439  );
2440
2441  let results = (outs
2442    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$min
2443  );
2444
2445  let builders = [TFL_BroadcastableBinaryBuilder];
2446
2447  let hasOptions = 0;
2448}
2449
2450def TFL_MulOp : TFL_Op<"mul", [
2451    ResultsBroadcastableShape,
2452    NoSideEffect,
2453    Commutative,
2454    QuantizableResult,
2455    BinaryOpSameElementTypeConstraint,
2456    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
2457      CPred<"TFL::VerifyMulOpShapeConstraints(llvm::cast<MulOp>($_op))">>,
2458    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
2459  let summary = "Multiplication operator";
2460
2461  let description = [{
2462    Element-wise multiplication operation.
2463  }];
2464
2465  let arguments = (
2466    ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16, Complex<F<32>>]>:$lhs,
2467    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16, Complex<F<32>>]>:$rhs,
2468    TFL_AFAttr:$fused_activation_function);
2469
2470  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16, Complex<F<32>>]>:$output);
2471
2472  let hasFolder = 1;
2473
2474  let builders = [TFL_FusedBroadcastableBinaryBuilder];
2475
2476  let hasCustomAssemblyFormat = 1;
2477
2478  let extraClassDefinition = [{
2479    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
2480      return parseOneResultSameOperandTypeOp(parser, result);
2481    }
2482    void $cppClass::print(OpAsmPrinter &p) {
2483      return printOneResultOp(getOperation(), p);
2484    }
2485  }];
2486
2487  let hasOptions = 1;
2488}
2489
2490def TFL_NegOp: TFL_Op<"neg", [
2491    NoSideEffect,
2492    SameOperandsAndResultShape,
2493    SameOperandsAndResultType]> {
2494  let summary = "Negation operator";
2495
2496  let description = [{
2497    Computes element-wise negation of input
2498  }];
2499
2500  let arguments = (ins TFL_TensorOf<[F32, I32, I64]>:$x);
2501
2502  let results = (outs TFL_TensorOf<[F32, I32, I64]>:$y);
2503
2504  let hasOptions = 0b1;
2505
2506  let hasFolder = 1;
2507}
2508
2509def TFL_PackOp : TFL_Op<"pack", [
2510    TFL_SameFirstOperandAndFirstResultElementType,
2511    NoSideEffect,
2512    QuantizableResult,
2513    SameOperandsAndResultsScale]> {
2514  let summary = "Packs a list of tensors along a dimension into one tensor";
2515
2516  let description = [{
2517    Packs a list of `values_count` rank-`R` tensors into one rank-`(R+1)`
2518    tensor.
2519
2520    Packs the `values_count` tensors in `values` into a tensor with rank one
2521    higher than each tensor in `values`, by packing them along the `axis`
2522    dimension.
2523
2524    Given a list of tensors of shape `(A, B, C)`;
2525
2526    if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
2527    if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
2528    Etc.
2529
2530    For example:
2531
2532    ```
2533    # 'x' is [1, 4]
2534    # 'y' is [2, 5]
2535    # 'z' is [3, 6]
2536    pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
2537    pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
2538    ```
2539
2540    This is the opposite of `unpack`.
2541  }];
2542
2543  let arguments = (ins
2544    TFL_VariadicTensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QUI8, QI16, TFL_Quint8]>:$values,
2545
2546    ConfinedAttr<I32Attr, [IntPositive]>:$values_count,
2547    I32Attr:$axis
2548  );
2549
2550  let results = (outs
2551    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QUI8, QI16, TFL_Quint8]>:$output
2552  );
2553
2554  let hasVerifier = 1;
2555
2556  let hasCanonicalizer = 1;
2557
2558  let hasOptions = 1;
2559}
2560
2561def TFL_PadOp : TFL_Op<"pad", [
2562    PredOpTrait<"input and output must have same element type",
2563      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2564    NoSideEffect,
2565    SameOperandsAndResultsScale,
2566    TFL_OperandHasRankAtMost<0, 5>,
2567    TFL_OperandHasRank<1, 2>,
2568    TFL_OperandRankEquals1DimOfOperand<0, 1>,
2569    QuantizableResult,
2570    PredOpTrait<"the first dim size of the padding argument must be at most 5",
2571      Or<[TFL_OperandIsUnrankedPred<1>,
2572          TFL_OperandDimIsAtMost<1, 0, 5>]>>]> {
2573  let summary = "Padding operator";
2574
2575  let description = [{
2576    This operation pads a `input` with zeros according to the `paddings` you
2577    specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
2578    the rank of `input`. For each dimension D of `input`, `paddings[D, 0]`
2579    indicates how many zeros to add before the contents of `input` in that
2580    dimension, and `paddings[D, 1]` indicates how many zeros to add after the
2581    contents of `input` in that dimension.
2582
2583    The padded size of each dimension D of the output is:
2584
2585      `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
2586
2587    For example:
2588
2589    ```
2590    # 't' is [[1, 1], [2, 2]]
2591    # 'paddings' is [[1, 1], [2, 2]]
2592    # rank of 't' is 2
2593    pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
2594                          [0, 0, 1, 1, 0, 0]
2595                          [0, 0, 2, 2, 0, 0]
2596                          [0, 0, 0, 0, 0, 0]]
2597    ```
2598  }];
2599
2600  let arguments = (ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2601    TFL_I32OrI64Tensor:$padding);
2602
2603  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2604
2605  let hasOptions = 1;
2606
2607  let hasFolder = 1;
2608}
2609
2610def TFL_PadV2Op : TFL_Op<"padv2", [
2611    PredOpTrait<"input and output must have same element type",
2612      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2613    NoSideEffect,
2614    QuantizableResult,
2615    SameOperandsAndResultsScale,
2616    TFL_OperandHasRankAtMost<0, 5>,
2617    TFL_OperandHasRank<1, 2>,
2618    TFL_OperandHasRank<2, 0>,
2619    TFL_OperandRankEquals1DimOfOperand<0, 1>,
2620    PredOpTrait<"the first dim size of the padding argument must be at most 5",
2621      Or<[TFL_OperandIsUnrankedPred<1>,
2622          TFL_OperandDimIsAtMost<1, 0, 5>]>>,
2623    PredOpTrait<"input and constant value operands must have same element type",
2624      TFL_TCopVTEtAreSameAt<0, 2>>]> {
2625  let summary = "Padding operator v2";
2626
2627  let description = [{
2628    This operation pads a `input` according to the `paddings` and
2629    `constant_values` you specify. `paddings` is an integer tensor with shape
2630    `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`,
2631    `paddings[D, 0]` indicates how many zeros to add before the contents of
2632    `input` in that dimension, and `paddings[D, 1]` indicates how many zeros to
2633    add after the contents of `input` in that dimension. `constant_values` is a
2634    scalar tensor of the same type as `input` that indicates the value to use
2635    for padding `input`.
2636
2637    The padded size of each dimension D of the output is:
2638
2639      `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
2640
2641    For example:
2642
2643    ```
2644    # 't' is [[1, 1], [2, 2]]
2645    # 'paddings' is [[1, 1], [2, 2]]
2646    # rank of 't' is 2
2647    pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
2648                          [0, 0, 1, 1, 0, 0]
2649                          [0, 0, 2, 2, 0, 0]
2650                          [0, 0, 0, 0, 0, 0]]
2651    ```
2652  }];
2653
2654  let arguments = (
2655    ins TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$input,
2656    TFL_I32OrI64Tensor:$padding,
2657    TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$constant_values);
2658
2659  let results = (outs TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$output);
2660
2661  let hasOptions = 1;
2662
2663  let hasFolder = 1;
2664}
2665
2666def TFL_PolyCallOp : Op<TFL_Dialect, "poly_call", [
2667    DeclareOpInterfaceMethods<RegionBranchOpInterface>,
2668    SingleBlockImplicitTerminator<"YieldOp">]> {
2669  let summary = [{Poly call}];
2670
2671  let description = [{
2672    Have multiple function bodies for the same computation. This allows a
2673    program compiler/interpreter to choose one of the available options to
2674    execute the program based on which one is most suitable for the target
2675    backend.
2676
2677    input:  A list of input tensors whose types are T.
2678    output: A list of output tensors whose types are T.
2679
2680    call:  Multiple regions, each of which encapsulates the same semantic
2681           computation but in different forms.
2682  }];
2683
2684  let arguments = (ins Variadic<AnyTensor>:$input);
2685
2686  let results = (outs Variadic<AnyTensor>:$output);
2687
2688  let regions = (region VariadicRegion<SizedRegion<1>>:$calls);
2689
2690  let hasCanonicalizer = 1;
2691}
2692
2693
2694def TFL_PowOp : TFL_Op<"pow", [
2695    ResultsBroadcastableShape,
2696    NoSideEffect,
2697    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>]> {
2698  let summary = "Power operator";
2699
2700  let description = [{
2701    Element-wise power operation.
2702  }];
2703
2704  let arguments = (
2705    ins TFL_TensorOf<[F32, I32]>:$lhs,
2706    TFL_TensorOf<[F32, I32]>:$rhs);
2707
2708  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
2709
2710  let hasCustomAssemblyFormat = 1;
2711
2712  let extraClassDefinition = [{
2713    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
2714      return parseOneResultSameOperandTypeOp(parser, result);
2715    }
2716    void $cppClass::print(OpAsmPrinter &p) {
2717      return printOneResultOp(getOperation(), p);
2718    }
2719  }];
2720
2721  let builders = [TFL_BroadcastableBinaryBuilder];
2722}
2723
2724def TFL_PReluOp : TFL_Op<"prelu", [
2725    NoSideEffect,
2726    QuantizableResult,
2727    ResultsBroadcastableShape,
2728    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
2729    BinaryOpSameElementTypeConstraint,
2730    PredOpTrait<"input and output must have the same element type",
2731      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2732    AffineQuantizedOpInterface, AffineOpCoefficient<-1, 1>]> {
2733  let summary = "Parameterized Relu operator";
2734
2735  let description = [{
2736    Parameterized Relu operator
2737      x -> x >= 0 ? x : (alpha * x)
2738    where alpha is a trainable tensor.
2739    input and alpha should be the same size as input or be broadcastable.
2740  }];
2741
2742  let arguments = (
2743    ins TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$input,
2744    TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$alpha
2745  );
2746
2747  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$output);
2748
2749  let hasVerifier = 1;
2750
2751  let extraClassDeclaration = [{
2752    // AffineQuantizedOpInterface:
2753    int GetChannelDimIndex() { return 0; }
2754    int GetQuantizationDimIndex() { return -1; }
2755  }];
2756}
2757
2758def TFL_RankOp: TFL_Op<"rank", [
2759    QuantizableResult,
2760    NoSideEffect]> {
2761  let summary = "Rank operator.";
2762  let description = [{
2763    Returns the rank of a tensor.
2764  }];
2765
2766  let arguments = (ins AnyTensor:$input);
2767
2768  let results = (outs TFL_IntTensor:$output);
2769
2770  let hasFolder = 1;
2771}
2772
2773def TFL_ReluOp: TFL_Op<"relu", [
2774    PredOpTrait<"x and y must have same element type",
2775      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2776    NoSideEffect,
2777    QuantizableResult,
2778    SameOperandsAndResultShape]> {
2779  let summary = "Relu operator";
2780
2781  let description = [{
2782    Element-wise Relu operator
2783      x -> max(0, x)
2784  }];
2785
2786  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8, QI16]>:$x);
2787
2788  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QI16]>:$y);
2789
2790  // This builder doesn't work with quantized type, so it can only be used by
2791  // non-quantization tablegen patterns. Currently, it is used by the
2792  // elementwise-move reordering pattern in the optimize_patterns.td
2793  let builders = [
2794    OpBuilder<(ins "Value":$input),
2795    [{
2796      $_state.addOperands({input});
2797      $_state.addTypes(input.getType());
2798    }]>
2799  ];
2800}
2801
2802def TFL_Relu6Op: TFL_Op<"relu6", [
2803    PredOpTrait<"x and y must have same element type",
2804      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2805    NoSideEffect,
2806    QuantizableResult,
2807    SameOperandsAndResultShape]> {
2808  let summary = "Relu6 operator";
2809
2810  let description = [{
2811    Element-wise Relu6 operator
2812      x -> max(0, min(6, x))
2813  }];
2814
2815  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
2816
2817  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
2818
2819  // This builder doesn't work with quantized type, so it can only be used by
2820  // non-quantization tablegen patterns. Currently, it is used by the
2821  // elementwise-move reordering pattern in the optimize_patterns.td
2822  let builders = [
2823    OpBuilder<(ins "Value":$input),
2824    [{
2825      $_state.addOperands({input});
2826      $_state.addTypes(input.getType());
2827    }]>
2828  ];
2829}
2830
2831def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [
2832    PredOpTrait<"x and y must have same element type",
2833      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2834    NoSideEffect,
2835    QuantizableResult,
2836    SameOperandsAndResultShape]> {
2837  let summary = "Relu1 operator";
2838
2839  let description = [{
2840    Element-wise Relu1 operator
2841      x -> max(-1, min(1, x))
2842  }];
2843
2844  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
2845
2846  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
2847
2848  // This builder doesn't work with quantized type, so it can only be used by
2849  // non-quantization tablegen patterns. Currently, it is used by the
2850  // elementwise-move reordering pattern in the optimize_patterns.td
2851  let builders = [
2852    OpBuilder<(ins "Value":$input),
2853    [{
2854      $_state.addOperands({input});
2855      $_state.addTypes(input.getType());
2856    }]>
2857  ];
2858}
2859
2860def TFL_ReshapeOp: TFL_Op<"reshape", [
2861    QuantizableResult,
2862    NoSideEffect,
2863    SameOperandsAndResultsScale,
2864    DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
2865  let summary = "Reshape operator";
2866
2867  let description = [{
2868    Produces a tensor with the same values but different static shape defined
2869    by the output type.
2870  }];
2871
2872  let arguments = (
2873    ins AnyTensor:$input,
2874    TFL_I32Tensor:$shape);
2875
2876  let results = (outs AnyTensor:$output);
2877  let hasCanonicalizer = 0b1;
2878  let hasFolder = 1;
2879  let hasVerifier = 1;
2880
2881  let extraClassDeclaration = [{
2882    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
2883  }];
2884}
2885
2886def TFL_ReverseSequenceOp : TFL_Op<"reverse_sequence", [
2887    PredOpTrait<"input and output must have same element type",
2888      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2889    NoSideEffect,
2890    QuantizableResult,
2891    TFL_OperandHasRank<1, 1>]> {
2892  let summary = "Reverses variable length slices.";
2893
2894  let description = [{
2895This op first slices `input` along the dimension `batch_dim`, and for each
2896slice `i`, reverses the first `seq_lengths[i]` elements along
2897the dimension `seq_dim`.
2898
2899The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
2900and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
2901
2902The output slice `i` along dimension `batch_dim` is then given by input
2903slice `i`, with the first `seq_lengths[i]` slices along dimension
2904`seq_dim` reversed.
2905  }];
2906
2907  let arguments = (ins
2908    TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$input,
2909    TFL_I32OrI64Tensor:$seq_lengths,
2910
2911    ConfinedAttr<I32Attr, [IntNonNegative]>:$seq_dim,
2912    ConfinedAttr<I32Attr, [IntNonNegative]>:$batch_dim
2913  );
2914
2915  let results = (outs
2916    TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$output
2917  );
2918
2919  let hasOptions = 1;
2920}
2921
2922def TFL_RsqrtOp: TFL_Op<"rsqrt", [NoSideEffect,
2923                                  QuantizableResult,
2924                                  TFL_SameFirstOperandAndFirstResultElementType,
2925                                  SameOperandsAndResultShape]> {
2926  let summary = "Reciprocal of square root operator";
2927
2928  let description = [{
2929    Computes element-wise reverse square root of input
2930  }];
2931
2932  let arguments = (ins TFL_TensorOf<[F32, QI8, QI16]>:$x);
2933
2934  let results = (outs TFL_TensorOf<[F32, QI8, QI16]>:$y);
2935
2936  let hasFolder = 1;
2937}
2938
2939def TFL_ShapeOp: TFL_Op<"shape", [
2940    QuantizableResult,
2941    NoSideEffect]> {
2942  let summary = "Shape operator";
2943
2944  let description = [{
2945    Returns the shape of a tensor.
2946  }];
2947
2948  let arguments = (ins AnyTensor:$input);
2949
2950  let results = (outs TFL_TensorOf<[I32, I64]>:$output);
2951
2952  DerivedTypeAttr out_type = DerivedTypeAttr<[{
2953    return getResult().getType().cast<TensorType>().getElementType();
2954  }]>;
2955
2956  let hasOptions = 1;
2957
2958  let hasFolder = 1;
2959}
2960
2961def TFL_RangeOp: TFL_Op<"range", [
2962    NoSideEffect,
2963    TFL_OperandHasRank<0, 0>,
2964    TFL_OperandHasRank<1, 0>,
2965    TFL_OperandHasRank<2, 0>,
2966    PredOpTrait<"operands and output must have same element type",
2967      And<[TCresVTEtIsSameAsOp<0, 0>, TCresVTEtIsSameAsOp<0, 1>,
2968           TCresVTEtIsSameAsOp<0, 2>]>>]> {
2969  let summary = "Range operator";
2970
2971  let description = [{
2972    Returns a 1D tensor defined by a sequence from `start` to `limit` with
2973    a given `delta`.
2974  }];
2975
2976  let arguments = (ins
2977    TFL_TensorOf<[I32, F32]>:$start,
2978    TFL_TensorOf<[I32, F32]>:$limit,
2979    TFL_TensorOf<[I32, F32]>:$delta);
2980
2981  let results = (outs TFL_TensorOf<[I32, F32]>:$result);
2982
2983  let hasFolder = 1;
2984}
2985
2986def TFL_ReverseV2Op: TFL_Op<"reverse_v2", [
2987    PredOpTrait<"input and output must have same element type",
2988      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2989    NoSideEffect,
2990    QuantizableResult,
2991    TFL_OperandHasRank<1, 1>]> {
2992  let summary = "ReverseV2 Operator";
2993
2994  let description = [{
2995    Reverses specific dimensions of a tensor.
2996
2997    Given a tensor, and a int32/int64 tensor axis representing the set
2998    of dimensions of tensor to reverse.
2999    This operation reverses each dimension i for
3000    which there exists j s.t. axis[j] == i.
3001
3002    Args:
3003      tensor: A Tensor. Must be one of the following types:
3004      uint8, int8, int16, int32, int64, float32, bool Up to 8-D.
3005
3006      axis: A Tensor. Must be one of the following types: int32, int64.
3007      with only 1 element which is the axis index.
3008      TODO: Add support for multiple elements.
3009  }];
3010
3011  let arguments = (
3012    ins
3013    TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, QI8, TFL_Quint8, I1]>:$input,
3014    TFL_I32Tensor:$axis
3015  );
3016
3017  let results = (outs
3018    TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, QI8, TFL_Quint8, I1]>:$output);
3019}
3020
3021// Select has many instances in TF models where one or more of its operands
3022// are unranked. Therefore, we skip adding shape constraints here.
3023def TFL_SelectOp : TFL_Op<"select", [
3024  NoSideEffect,
3025  SameOperandsAndResultsScale,
3026  QuantizableResult,
3027  PredOpTrait<"operands have same element type", TFL_TCopVTEtAreSameAt<1, 2>>,
3028  PredOpTrait<"operands and result have same element type",
3029    TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
3030  let summary = "Select operator";
3031
3032  let description = [{
3033    Select values of 'x' if the corresponding value of 'condition' is true or
3034    the value of 'y' if false. There are valid condition input sizes:
3035
3036    1. Either the same shape (in which case the select is elementwise), or
3037    2. condition must be Rank 1 and match over the first dimension.
3038  }];
3039
3040  let arguments = (ins
3041    TFL_BoolTensor:$condition,
3042    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
3043    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
3044
3045  let results = (outs
3046    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
3047
3048  // TODO(jpienaar): autogenerate this.
3049  let builders = [
3050    OpBuilder<(ins "Value":$condition, "Value":$x, "Value":$y),
3051    [{
3052    auto resultType = x.getType();
3053    $_state.addOperands({condition, x, y});
3054    $_state.types.push_back(resultType);
3055  }]>];
3056
3057  let hasOptions = 1;
3058}
3059
3060def TFL_SelectV2Op : TFL_Op<"select_v2", [
3061    ResultsBroadcastableShape,
3062    NoSideEffect,
3063    QuantizableResult,
3064    SameOperandsAndResultsScale,
3065    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1, 2], 5>,
3066    PredOpTrait<"operands have same element type", TFL_TCopVTEtAreSameAt<1, 2>>,
3067    PredOpTrait<"operands and result have same element type",
3068      TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
3069  let summary = "SelectV2 operator";
3070
3071  let description = [{
3072    Select values of 'x' if the corresponding value of 'condition' is true or
3073    the value of 'y' if false. There are valid condition input sizes:
3074
3075    1. Either the same shape (in which case the select is elementwise), or
3076    2. Broadcastable shapes between 'condition', 'x' and 'y'.
3077  }];
3078
3079  let arguments = (ins
3080    TFL_BoolTensor:$condition,
3081    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
3082    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
3083
3084  let results = (outs
3085    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
3086
3087  let builders = [
3088    OpBuilder<(ins "Value":$cond, "Value":$x, "Value":$y),
3089    [{
3090    BuildSelectV2Op(&$_builder, $_state, cond, x, y);
3091  }]>];
3092
3093  let hasOptions = 1;
3094}
3095
3096def TFL_SinOp: TFL_Op<"sin", [
3097    NoSideEffect,
3098    SameOperandsAndResultShape,
3099    SameOperandsAndResultType]> {
3100  let summary = "Sine operator";
3101
3102  let description = [{
3103    Computes element-wise Sine of input
3104  }];
3105
3106  let arguments = (ins TFL_FpTensor:$x);
3107
3108  let results = (outs TFL_FpTensor:$y);
3109
3110  let hasFolder = 1;
3111}
3112
3113def TFL_SoftmaxOp : TFL_Op<"softmax", [
3114    NoSideEffect,
3115    PredOpTrait<"input and output must have same element type",
3116      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3117    TFL_OperandHasRankAtLeast<0, 1>,
3118    SameOperandsAndResultShape,
3119    QuantizableResult,
3120    FixedOutputRangeInterface,
3121    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
3122  let summary = "Softmax operator";
3123
3124  let description = [{
3125    Computes element-wise softmax activations with the following formula
3126
3127      exp(input) / tf.reduce_sum(exp(input * beta), dim)
3128  }];
3129
3130  let arguments = (
3131    ins TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8, QI16]>:$input,
3132    F32Attr:$beta
3133  );
3134
3135  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8, QI16]>:$output);
3136
3137  let hasOptions = 1;
3138
3139  let extraClassDeclaration = [{
3140  // FixedOutputRangeInterface:
3141  quant::UniformQuantizedType GetFixedOutputRange(
3142      bool is_signed, int bit_width) {
3143    auto result_type = output().getType();
3144    // zero_point = 0
3145    // scale = 1. / (max_value + 1)
3146    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
3147        /*scale=*/1.0 / 256, /*zero_point=*/-128);
3148  }
3149  }];
3150}
3151
3152def TFL_SqrtOp: TFL_Op<"sqrt", [
3153    NoSideEffect,
3154    SameOperandsAndResultShape,
3155    SameOperandsAndResultType]> {
3156  let summary = "Square root operator";
3157
3158  let description = [{
3159    Computes element-wise Square root of input
3160  }];
3161
3162  let arguments = (ins TFL_FpTensor:$x);
3163
3164  let results = (outs TFL_FpTensor:$y);
3165
3166  let hasFolder = 1;
3167}
3168
3169def TFL_SquareOp: TFL_Op<"square", [
3170    NoSideEffect,
3171    SameOperandsAndResultShape,
3172    SameOperandsAndResultType]> {
3173  let summary = "Square operator";
3174
3175  let description = [{
3176    Computes element-wise Square of input
3177  }];
3178
3179  let arguments = (ins TFL_FpTensor:$x);
3180
3181  let results = (outs TFL_FpTensor:$y);
3182
3183  let hasOptions = 0b1;
3184
3185  let hasFolder = 1;
3186}
3187
3188def TFL_SubOp : TFL_Op<"sub", [
3189    ResultsBroadcastableShape,
3190    BinaryOpSameElementTypeConstraint,
3191    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
3192      CPred<"TFL::VerifySubOpShapeConstraints(llvm::cast<SubOp>($_op))">>,
3193    NoSideEffect,
3194    QuantizableResult,
3195    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
3196  let summary = "Subtraction operator";
3197
3198  let description = [{
3199    Element-wise subtraction operation.
3200  }];
3201
3202  let arguments = (
3203    ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$lhs,
3204    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$rhs,
3205    TFL_AFAttr:$fused_activation_function);
3206
3207  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$output);
3208
3209  let hasFolder = 1;
3210
3211  let builders = [TFL_FusedBroadcastableBinaryBuilder];
3212
3213  let hasCustomAssemblyFormat = 1;
3214
3215  let extraClassDefinition = [{
3216    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
3217      return parseOneResultSameOperandTypeOp(parser, result);
3218    }
3219    void $cppClass::print(OpAsmPrinter &p) {
3220      return printOneResultOp(getOperation(), p);
3221    }
3222  }];
3223
3224  let hasOptions = 1;
3225}
3226
3227def TFL_SquaredDifferenceOp : TFL_Op<"squared_difference", [
3228    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
3229    BinaryOpSameElementTypeConstraint,
3230    TFL_SameFirstOperandAndFirstResultElementType,
3231    ResultsBroadcastableShape,
3232    QuantizableResult,
3233    NoSideEffect]> {
3234  let summary = "Squared difference operator";
3235
3236  let description = [{
3237    Element-wise squared difference operation.
3238  }];
3239
3240  let arguments = (
3241    ins TFL_TensorOf<[F32, I32, QI8]>:$lhs,
3242    TFL_TensorOf<[F32, I32, QI8]>:$rhs);
3243
3244  let results = (outs TFL_TensorOf<[F32, I32, QI8]>:$output);
3245
3246  let builders = [TFL_BroadcastableBinaryBuilder];
3247
3248  let hasCustomAssemblyFormat = 1;
3249
3250  let extraClassDefinition = [{
3251    ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) {
3252      return parseOneResultSameOperandTypeOp(parser, result);
3253    }
3254    void $cppClass::print(OpAsmPrinter &p) {
3255      return printOneResultOp(getOperation(), p);
3256    }
3257  }];
3258}
3259
3260def TFL_TanhOp: TFL_Op<"tanh", [
3261    NoSideEffect,
3262    SameOperandsAndResultShape,
3263    PredOpTrait<"input and output must have same element type",
3264      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3265    FixedOutputRangeInterface,
3266    QuantizableResult,
3267    DeclareOpInterfaceMethods<TFL_ArithmeticCount>]> {
3268  let summary = "Hyperbolic tangent operator";
3269
3270  let description = [{
3271    Computes element-wise Hyperbolic tangent of input
3272  }];
3273
3274  let arguments = (ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$input);
3275
3276  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$output);
3277
3278  // This builder doesn't work with quantized type, so it can only be used by
3279  // non-quantization tablegen patterns. Currently, it is used by the
3280  // elementwise-move reordering pattern in the optimize_patterns.td
3281  let builders = [
3282    OpBuilder<(ins "Value":$input),
3283    [{
3284      $_state.addOperands({input});
3285      $_state.addTypes(input.getType());
3286    }]>
3287  ];
3288
3289  let extraClassDeclaration = [{
3290  // FixedOutputRangeInterface:
3291  quant::UniformQuantizedType GetFixedOutputRange(
3292      bool is_signed, int bit_width) {
3293    auto result_type = output().getType();
3294    // central_value = min_value / 2 + (max_value - 1) / 2 + 1
3295    // zero_point = central_value
3296    // scale = 1. / (central_value - min_value)
3297    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
3298        /*scale=*/1.0 / 128, /*zero_point=*/0);
3299  }
3300  }];
3301}
3302
3303def TFL_TileOp: TFL_Op<"tile", [
3304    NoSideEffect,
3305    SameOperandsAndResultsScale,
3306    QuantizableResult,
3307    PredOpTrait<"input and output must have same element type",
3308      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
3309  let summary = "Tile operator.";
3310  let description = [{
3311    Constructs a tensor by tiling a given tensor.
3312
3313   This operation creates a new tensor by replicating input
3314   multiples times. The output tensor's i'th dimension has
3315   input.dims(i) * multiples[i] elements, and the values of input
3316   are replicated multiples[i] times along the 'i'th dimension.
3317   For example, tiling [a b c d] by [2] produces [a b c d a b c d].
3318  }];
3319
3320  let arguments = (ins
3321    TFL_TensorOf<[F32, I1, I32, I64, UI8, QI8, QUI8, TFL_Str]>:$input,
3322    TFL_I32OrI64Tensor:$multiples);
3323
3324  let results = (outs
3325    TFL_TensorOf<[F32, I1, I32, I64, UI8, QI8, QUI8, TFL_Str]>:$output);
3326
3327  let hasOptions = 0;
3328}
3329
3330// TODO(jpienaar): Maybe make it accept any single element tensor as `k`.
3331// TODO(jpienaar): Check that input has one or more dimensions.
3332// TODO(jpienaar): Check that k is less or equal the internal dimension
3333def TFL_TopKV2Op: TFL_Op<"topk_v2", [
3334    QuantizableResult,
3335    NoSideEffect,
3336    TFL_OperandHasRankAtLeast<0, 1>,
3337    TFL_OperandHasRank<1, 0>,
3338    PredOpTrait<"result and input element type match",
3339      TFL_TCresVTEtIsSameAsOp<0,0>>,
3340    SameOperandsAndResultsScale]> {
3341  let summary = "TopK operator";
3342
3343  let description = [{
3344    Returns the top `k` largest element along each last dimensional slice of
3345    `input` and the indices of values within the last dimension of the input
3346    tensor.
3347
3348    Results are always sorted in the descending order.
3349  }];
3350
3351  let arguments = (ins
3352    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$input,
3353    TFL_I32Tensor:$k);
3354
3355  let results = (outs
3356    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$values,
3357    TFL_I32Tensor:$indices);
3358
3359  let builders = [
3360    OpBuilder<(ins "Value":$input, "Value":$k),
3361    [{ BuildTopKOp(&$_builder, $_state, input, k); }]>];
3362
3363  let hasOptions = 1;
3364}
3365
3366def TFL_TransposeOp : TFL_Op<"transpose", [
3367    NoSideEffect,
3368    QuantizableResult,
3369    TFL_OperandHasRankAtMost<0, 5>,
3370    TFL_OperandHasRank<1, 1>,
3371    PredOpTrait<"input and output must have same element type",
3372      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3373    SameOperandsAndResultsScale]> {
3374  let summary = "Transpose operator";
3375
3376  let description = [{
3377    Returns the Transpose of x
3378  }];
3379
3380  let arguments = (ins
3381    TFL_TensorOf<[I32, F32, I8, UI8, QI8, QUI8, TFL_Quint8, I1, I64, QI16]>:$input,
3382    TFL_TensorOf<[I32]>:$perm
3383  );
3384
3385  let results = (outs
3386    TFL_TensorOf<[I32, F32, I8, UI8, QI8, QUI8, TFL_Quint8, I1, I64, QI16]>:$output
3387  );
3388
3389  let hasVerifier = 1;
3390
3391  let hasFolder = 1;
3392
3393  let builders = [
3394    OpBuilder<(ins "Value":$input, "Value":$perm),
3395    [{ BuildTransposeOp(&$_builder, $_state, input, perm); }]>
3396  ];
3397
3398  let extraClassDeclaration = [{
3399    // Quantized axes are verified in the Verify function.
3400    bool RequiredSameQuantizedAxes() { return false; }
3401  }];
3402}
3403
3404def TFL_UnpackOp : TFL_Op<"unpack", [
3405    NoSideEffect,
3406    QuantizableResult,
3407    SameOperandsAndResultElementType,
3408    SameOperandsAndResultsScale,
3409    DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
3410  let summary = "Unpacks a tensor along a dimension into multiple tensors";
3411
3412  let description = [{
3413    Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
3414
3415    Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
3416    For example, given a tensor of shape `(A, B, C, D)`;
3417
3418    If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
3419      and each tensor in `output` will have shape `(B, C, D)`. (Note that the
3420      dimension unpacked along is gone, unlike `split`).
3421
3422    If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
3423      and each tensor in `output` will have shape `(A, C, D)`.
3424    Etc.
3425
3426    This is the opposite of `pack`.
3427  }];
3428
3429  let arguments = (ins
3430    TFL_TensorOf<[F32, I1, I8, UI8, I32, QI8, QUI8, I16, QI16]>:$input,
3431
3432    ConfinedAttr<I32Attr, [IntNonNegative]>:$num,
3433    I32Attr:$axis
3434  );
3435
3436  let results = (outs
3437    TFL_VariadicTensorOf<[F32, I1, I8, UI8, I32, QI8, QUI8, I16, QI16]>:$outputs
3438  );
3439
3440  let extraClassDeclaration = [{
3441    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
3442  }];
3443
3444  let hasOptions = 1;
3445}
3446
3447def TFL_ZerosLikeOp: TFL_Op<"zeros_like", [
3448    PredOpTrait<"input and output must have same element type",
3449      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3450    SameOperandsAndResultShape,
3451    NoSideEffect]> {
3452  let summary = "ZerosLike operator";
3453
3454  let description = [{
3455    Returns a tensor of zeros with the same shape and type as the input tensor.
3456  }];
3457
3458  let arguments = (ins TFL_TensorOf<[I64, I32, F32]>:$input);
3459
3460  let results = (outs TFL_TensorOf<[I64, I32, F32]>:$output);
3461
3462  let hasOptions = 1;
3463}
3464
3465def TFL_BatchToSpaceNdOp: TFL_Op<"batch_to_space_nd", [
3466    NoSideEffect,
3467    SameOperandsAndResultsScale,
3468    QuantizableResult,
3469    PredOpTrait<"input and output must have same element type",
3470      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3471    TFL_OperandHasRankRange<0, 3, 4>,
3472    TFL_OperandHasRank<1, 1>,
3473    TFL_OperandHasRank<2, 2>
3474  ]> {
3475  let summary = "BatchToSpaceNd operator";
3476
3477  let description = [{
3478    This operation reshapes the "batch" dimension 0 into space dimensions.
3479  }];
3480
3481  let arguments = (ins
3482    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$input,
3483    TFL_TensorOf<[I32]>:$block_shape,
3484    TFL_TensorOf<[I32]>:$indices
3485  );
3486
3487  let results = (outs
3488    TFL_TensorOf<[F32, I16, I32, I64, UI8, QI8, QUI8]>:$output
3489  );
3490}
3491
3492def TFL_SpaceToBatchNdOp: TFL_Op<"space_to_batch_nd", [
3493    NoSideEffect,
3494    QuantizableResult,
3495    SameOperandsAndResultsScale,
3496    TFL_OperandHasRankRange<0, 3, 4>,
3497    PredOpTrait<"input and output must have same element type",
3498      TFL_TCresVTEtIsSameAsOp<0, 0>>
3499  ]> {
3500  let summary = "SpaceToBatchNd operator";
3501
3502  let description = [{
3503    This operation reshapes space dimensions into the "batch" dimension 0
3504  }];
3505
3506  let arguments = (ins
3507    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
3508    TFL_I32Tensor:$block_shape,
3509    TFL_I32Tensor:$paddings
3510  );
3511
3512  let results = (outs
3513    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output
3514  );
3515}
3516
3517def TFL_SpaceToDepthOp: TFL_Op<"space_to_depth", [
3518    NoSideEffect,
3519    QuantizableResult,
3520    SameOperandsAndResultsScale,
3521    PredOpTrait<"input and output must have same element type",
3522      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3523    TFL_OperandHasRankAtMost<0, 4>
3524  ]> {
3525  let summary = "SpaceToDepth operator";
3526
3527  let description = [{
3528    Rearranges blocks of spatial data, into depth. More specifically,
3529    this op outputs a copy of the input tensor where values from the `height`
3530    and `width` dimensions are moved to the `depth` dimension.
3531    `block_size` indicates the input block size.
3532   }];
3533
3534  let arguments = (ins
3535    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
3536    ConfinedAttr<I32Attr, [IntPositive]>:$block_size
3537  );
3538
3539  let results = (outs
3540    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output
3541  );
3542
3543  let hasOptions = 1;
3544}
3545
3546def TFL_DepthToSpaceOp: TFL_Op<"depth_to_space", [
3547    NoSideEffect,
3548    QuantizableResult,
3549    SameOperandsAndResultsScale,
3550    PredOpTrait<"input and output must have same element type",
3551      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3552    TFL_OperandHasRankAtMost<0, 4>
3553  ]> {
3554  let summary = "DepthToSpace operator";
3555
3556  let description = [{
3557    Rearranges data from depth into blocks of spatial data.
3558    This is the reverse transformation of SpaceToDepth. More specifically,
3559    this op outputs a copy of the input tensor where values from the `depth`
3560    dimension are moved in spatial blocks to the `height` and `width`
3561    dimensions. The attr `block_size` indicates the input block size and how
3562    the data is moved.
3563   }];
3564
3565  let arguments = (ins
3566    TFL_TensorOf<[F32, I8, I32, I64, TFL_Quint8, UI8, QI8, QUI8]>:$input,
3567    ConfinedAttr<I32Attr, [IntPositive]>:$block_size
3568  );
3569
3570  let results = (outs
3571    TFL_TensorOf<[F32, I8, I32, I64, TFL_Quint8, UI8, QI8, QUI8]>:$output
3572  );
3573
3574  let hasOptions = 1;
3575}
3576
3577def TFL_SplitOp : TFL_Op<"split", [
3578    NoSideEffect,
3579    TFL_Operand0DOr1ElementTensor<0>,
3580    QuantizableResult,
3581    SameOperandsAndResultsScale]> {
3582  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
3583
3584  let description = [{
3585    Splits the `value` tensor along `split_dim` into a number of sub-tensors
3586    with same shape as the original one, except for `split_dim`. Same as
3587    tf.Split.
3588  }];
3589
3590  let arguments = (ins
3591    TFL_TensorOf<[I32]>:$split_dim,
3592    TFL_TensorOf<[F32, I16, I32, I8, UI8, QI8, QUI8, QI16]>:$value,
3593    ConfinedAttr<I32Attr, [IntPositive]>:$num_splits
3594  );
3595
3596  let results = (outs
3597    TFL_VariadicTensorOf<[F32, I16, I32, I8, UI8, QI8, QUI8, QI16]>:$outputs
3598  );
3599
3600  let hasVerifier = 1;
3601
3602  let hasOptions = 1;
3603}
3604
3605def TFL_SplitVOp : TFL_Op<"split_v", [
3606    NoSideEffect,
3607    QuantizableResult,
3608    SameOperandsAndResultsScale]> {
3609  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
3610
3611  let description = [{
3612    Splits the `value` tensor along `split_dim` into a number of sub-tensors
3613    with same shape as the original one, except for `split_dim`. The grouping
3614    of the resultant sub-tensors is decided by `size-splits`. Same as tf.SplitV.
3615  }];
3616
3617  let arguments = (ins
3618    TFL_TensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$value,
3619    TFL_1DTensorOf<[I32], [I32]>:$size_splits,
3620    TFL_0DTensorOf<[I32], [I32]>:$split_dim,
3621    ConfinedAttr<I32Attr, [IntPositive]>:$num_splits
3622  );
3623
3624  let results = (outs
3625    TFL_VariadicTensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$outputs
3626  );
3627
3628  let hasVerifier = 1;
3629
3630  let hasOptions = 1;
3631}
3632
3633def TFL_ResizeBilinearOp: TFL_Op<"resize_bilinear", [
3634    NoSideEffect,
3635    QuantizableResult,
3636    PredOpTrait<"input and output must have same element type",
3637      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3638    TFL_OperandHasRank<0, 4>,
3639    TFL_OperandHasRank<1, 1>,
3640    SameOperandsAndResultsScale]> {
3641  let summary = "ResizeBilinear Op";
3642
3643  let description = [{
3644    Resize `images` to `size` using bilinear interpolation.
3645  }];
3646
3647  let arguments = (ins
3648    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$input,
3649    TFL_I32Tensor:$size,
3650    BoolAttr:$align_corners,
3651    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
3652  );
3653
3654  let results = (outs
3655    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$output
3656  );
3657
3658  let hasOptions = 1;
3659}
3660
3661def TFL_ResizeNearestNeighborOp : TFL_Op<"resize_nearest_neighbor", [
3662    NoSideEffect,
3663    QuantizableResult,
3664    PredOpTrait<"input and output must have same element type",
3665      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3666    TFL_OperandHasRank<0, 4>,
3667    TFL_OperandHasRank<1, 1>,
3668    SameOperandsAndResultsScale]> {
3669  let summary = "ResizeNearestNeighbor Op";
3670
3671  let description = [{
3672    Resize `images` to `size` using nearest neighbor interpolation.
3673  }];
3674
3675  let arguments = (ins
3676    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$input,
3677    TFL_I32Tensor:$size,
3678    BoolAttr:$align_corners,
3679    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
3680  );
3681
3682  let results = (outs
3683    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$output
3684  );
3685
3686  let hasOptions = 1;
3687}
3688
3689def TFL_SparseToDenseOp : TFL_Op<"sparse_to_dense", [
3690    NoSideEffect,
3691    QuantizableResult,
3692    PredOpTrait<"sparse_values and dense must have same element type",
3693      TFL_TCresVTEtIsSameAsOp<0, 2>>,
3694    PredOpTrait<"default_value and dense must have same element type",
3695      TFL_TCresVTEtIsSameAsOp<0, 3>>,
3696    TFL_OperandHasRankAtMost<0, 2>,
3697    TFL_OperandHasRankAtMost<1, 1>,
3698    TFL_OperandHasRankAtMost<2, 1>,
3699    PredOpTrait<"the first operand should have a rank <= 2, when its rank is 2 and has static shape, the second dim should be <= 4",
3700      Or<[TFL_OperandIsUnrankedPred<0>,
3701          CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() <= 1">,
3702          CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() == 2 && !$_op.getOperand(0).getType().cast<ShapedType>().hasStaticShape()">,
3703          CPred<"$_op.getOperand(0).getType().cast<ShapedType>().getRank() == 2 && $_op.getOperand(0).getType().cast<ShapedType>().getShape()[1] <= 4">]>>]> {
3704  let summary = "Converts a sparse representation into a dense tensor.";
3705
3706  let description = [{
3707Builds an array `dense` with shape `output_shape` such that
3708
3709```
3710# If sparse_indices is scalar
3711dense[i] = (i == sparse_indices ? sparse_values : default_value)
3712
3713# If sparse_indices is a vector, then for each i
3714dense[sparse_indices[i]] = sparse_values[i]
3715
3716# If sparse_indices is an n by d matrix, then for each i in [0, n)
3717dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
3718```
3719
3720All other values in `dense` are set to `default_value`.  If `sparse_values` is a
3721scalar, all sparse indices are set to this single value.
3722
3723Indices should be sorted in lexicographic order, and indices must not
3724contain any repeats. If `validate_indices` is true, these properties
3725are checked during execution.
3726  }];
3727
3728  let arguments = (ins
3729    TFL_I32OrI64Tensor:$sparse_indices,
3730    TFL_I32OrI64Tensor:$output_shape,
3731    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$sparse_values,
3732    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$default_value
3733  );
3734
3735  let results = (outs
3736    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$dense
3737  );
3738}
3739
3740def TFL_StridedSliceOp: TFL_Op<"strided_slice", [
3741    NoSideEffect,
3742    QuantizableResult,
3743    PredOpTrait<"input and output must have same element type",
3744      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3745    SameOperandsAndResultsScale,
3746    TFL_OperandHasRankAtMost<0, 5>,
3747    TFL_OperandHasRank<1, 1>,
3748    TFL_OperandHasRank<2, 1>,
3749    TFL_OperandHasRank<3, 1>
3750  ]> {
3751  let summary = "StridedSlice Op";
3752
3753  let description = [{
3754    Return a strided slice from `input`.
3755  }];
3756
3757  let arguments = (ins
3758    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, I1, I16, QI16, TFL_Quint8, TFL_Str]>:$input,
3759    TFL_I32Tensor:$begin,
3760    TFL_I32Tensor:$end,
3761    TFL_I32Tensor:$strides,
3762
3763    I32Attr:$begin_mask,
3764    I32Attr:$end_mask,
3765    I32Attr:$ellipsis_mask,
3766    I32Attr:$new_axis_mask,
3767    I32Attr:$shrink_axis_mask
3768  );
3769
3770  let results = (outs
3771    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, I1, I16, QI16, TFL_Quint8, TFL_Str]>:$output
3772  );
3773
3774  // TFLite kernel only supports up to 5D input including added axis.
3775  let hasVerifier = 1;
3776
3777  let hasOptions = 1;
3778
3779  let hasFolder = 1;
3780}
3781
3782// If there is a change in supporting more types in the TFLite cast op kernel,
3783// the While loop outline pass should be updated since it inserts cast op(s)
3784// after the TF -> TFL legalization pass is done.
3785// LINT.IfChange
3786def TFL_CastOp : TFL_Op<"cast", [
3787    NoSideEffect,
3788    SameOperandsAndResultShape]> {
3789  let summary = "Cast operator";
3790
3791  let description = [{
3792    Casts input from input type to output type.
3793  }];
3794
3795  let arguments = (ins
3796    TFL_TensorOf<[F32, I1, I16, UI16, I32, UI32, I64, TFL_Quint8, UI8, I8, Complex<F<32>>]>:$input
3797  );
3798
3799  let results = (outs TFL_TensorOf<[F32, I1, I16, UI16, I32, UI32, I64, TFL_Quint8, UI8, I8, Complex<F<32>>]>:$output);
3800
3801  // TFLite's cast op does not utilize CastOptions, instead derives types
3802  // from the TfLiteTensors.
3803  let hasOptions = 0;
3804
3805  let hasFolder = 1;
3806}
3807// LINT.ThenChange(//tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc)
3808
3809def TFL_MirrorPadOp: TFL_Op<"mirror_pad", [
3810                     SameOperandsAndResultsScale,
3811                     QuantizableResult,
3812                     NoSideEffect,
3813                     TFL_OperandHasRank<1, 2>]> {
3814  let summary = "MirrorPad Operator. Pads a tensor with mirrored values.";
3815
3816  let description = [{
3817    This operation pads a input with mirrored values according to the paddings
3818    you specify. paddings is an integer tensor with shape [n, 2],
3819    where n is the rank of input.
3820    For each dimension D of input, paddings[D, 0] indicates how many values
3821    to add before the contents of input in that dimension,
3822    and paddings[D, 1] indicates how many values to add after the contents of
3823    input in that dimension.
3824
3825    Both paddings[D, 0] and paddings[D, 1] must be no greater than
3826    input.dim_size(D) (or input.dim_size(D) - 1)
3827    if copy_border is true (if false, respectively).
3828
3829    The padded size of each dimension D of the output is:
3830
3831    paddings(D, 0) + input.dim_size(D) + paddings(D, 1)
3832  }];
3833
3834  let arguments = (ins
3835    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$input,
3836    TFL_TensorOf<[I32, I64]>:$pad,
3837    TFL_MirrorPaddingAttr:$mode
3838  );
3839
3840  let results = (outs
3841    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$output
3842  );
3843
3844  let hasOptions = 1;
3845}
3846
3847def TFL_UniqueOp: TFL_Op<"unique", [
3848    TFL_OperandHasRank<0, 1>,
3849    QuantizableResult,
3850    NoSideEffect]> {
3851  let summary = "Unique Op.";
3852
3853  let description = [{
3854  This operation returns a tensor `output` containing all of the unique elements
3855of `input` sorted in the same order that they occur in `input`. This operation
3856also returns a tensor `idx` the same size as `x` that contains the index of each
3857value of `input` in the unique output `output`. In other words:
3858  }];
3859
3860  let arguments = (ins
3861    TFL_TensorOf<[I8, QI8, UI8, QUI8, I16, QI16, I32, I64, F32]>:$input
3862  );
3863
3864  let results = (outs
3865    TFL_TensorOf<[I8, QI8, UI8, QUI8, I16, QI16, I32, I64, F32]>:$output,
3866    TFL_I32OrI64Tensor:$idx
3867  );
3868
3869  DerivedTFLiteTypeAttr idx_out_type = DerivedTFLiteTypeAttr<[{
3870    return getResult(1).getType().cast<TensorType>().getElementType().
3871        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
3872            tflite::TensorType_INT32;
3873    }], [{
3874      TypeAttr::get(getResult(1).getType().cast<TensorType>().getElementType())
3875    }]>;
3876
3877  let hasOptions = 1;
3878}
3879
3880def TFL_GeluOp: TFL_Op<"gelu", [
3881    NoSideEffect,
3882    SameOperandsAndResultShape,
3883    QuantizableResult,
3884    PredOpTrait<"input and output must have same element type",
3885      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
3886  let summary = "GELU activation function.";
3887  let description = [{
3888    Computes GELU activation function element-wise.
3889  }];
3890
3891  let arguments = (ins
3892    TFL_TensorOf<[F32, QI8, QUI8]>:$input,
3893    DefaultValuedAttr<BoolAttr, "false">:$approximate
3894  );
3895
3896  let results = (outs TFL_TensorOf<[F32, QI8, QUI8]>:$output);
3897
3898  let hasOptions = 1;
3899}
3900
3901def TFL_DynamicUpdateSliceOp: TFL_Op<"dynamic_update_slice", [
3902    NoSideEffect,
3903    PredOpTrait<"input and output must have same element type",
3904      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
3905  let summary = "DynamicUpdateSlice.";
3906  let description = [{
3907    DynamicUpdateSlice op that have the same semantics with XLA
3908    DynamicUpdateSlice.
3909    Generates a result which is the value of the input array
3910    operand, with a slice update overwritten at start_indices.
3911
3912    See https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice.
3913  }];
3914
3915  let arguments = (ins
3916    TFL_TensorOf<[I1, I8, I32, I64, F32]>:$operand,
3917    TFL_TensorOf<[I1, I8, I32, I64, F32]>:$update,
3918    TFL_I32Tensor:$start_indices
3919  );
3920
3921  let results = (
3922    outs TFL_TensorOf<[I1, I8, I32, I64, F32]>:$output);
3923}
3924
3925//===----------------------------------------------------------------------===//
3926// Quantization ops.
3927//===----------------------------------------------------------------------===//
3928def TFL_DequantizeOp: TFL_Op<"dequantize", []> {
3929  let summary = "Dequantize operator";
3930
3931  let description = [{
3932    Converts quantized array of integers to floating-points according to the
3933    quantization parameters.
3934  }];
3935
3936  let arguments = (ins TFL_TensorOf<[QI8, QUI8, QI16, F16]>:$input);
3937
3938  let results = (outs TFL_FpTensor:$output);
3939}
3940
3941def TFL_FakeQuantOp : TFL_Op<"fake_quant", [
3942    NoSideEffect,
3943    QuantizableResult]> {
3944  let summary = "FakeQuant operator";
3945
3946  let description = [{
3947    Fake-quantize the 'inputs' tensor of type float via float scalars min and
3948    max to 'outputs' tensor of same shape as inputs.
3949  }];
3950
3951  let arguments = (
3952    ins TFL_FpTensor:$input,
3953    // The expected [min, max] range of values.
3954    F32Attr:$min,
3955    F32Attr:$max,
3956
3957    // The bitwidth of the quantization; between 2 and 16, inclusive.
3958    ConfinedAttr<I32Attr, [IntMinValue<2>, IntMaxValue<16>]>:$num_bits,
3959    // Quantization range starts from 0 or 1; starts from 1 if true.
3960    ConfinedAttr<BoolAttr, [TFL_BoolFalse]>:$narrow_range);
3961
3962  let results = (outs TFL_FpTensor:$output);
3963
3964  let hasCanonicalizer = 0b1;
3965
3966  let hasOptions = 1;
3967}
3968
3969// TODO(b/200841823): consider adding TFL_RuntimeVerification trait.
3970def TFL_QConstOp : Op<TFL_Dialect, "pseudo_qconst", [
3971    NoSideEffect,
3972    FirstAttrDerivedResultType]> {
3973  let summary = "Quantized constant pseudo op";
3974
3975  let description = [{
3976    Represents a quantized constant value in TensorFlow Lite dialect. This is
3977    not an actual operation and it will be lowered to buffer instead. The
3978    quantization parameters are stored as a type attribute in this constant.
3979  }];
3980
3981  let arguments = (
3982    ins TensorTypeAttr:$qtype,
3983    ElementsAttr:$value
3984  );
3985
3986  let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
3987
3988  let builders = [
3989    OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value),
3990    [{
3991      $_state.addAttribute("qtype", qtype);
3992      $_state.addAttribute("value", value);
3993      $_state.addTypes(qtype.getValue());
3994    }]>
3995  ];
3996}
3997
3998def TFL_SparseQConstOp : Op<TFL_Dialect, "pseudo_sparse_qconst", [
3999    NoSideEffect,
4000    FirstAttrDerivedResultType,
4001    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
4002  let summary = "Sparse quantized constant pseudo op";
4003
4004  let description = [{
4005    Represents a sparse quantized constant value in TensorFlow Lite dialect.
4006    This is not an actual operation and it will be lowered to buffer instead.
4007    The quantization parameters are stored as a type attribute in this constant.
4008  }];
4009
4010  let arguments = (
4011    ins TensorTypeAttr:$qtype,
4012    ElementsAttr:$value,
4013    SparsityParameterAttr:$s_param,
4014    ElementsAttr:$compressed_data
4015  );
4016
4017  let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
4018
4019  let builders = [
4020    OpBuilder<(ins "TypeAttr":$qtype, "Attribute":$value,
4021      "SparsityParameterAttr":$s_param, "Attribute":$compressed_data),
4022    [{
4023      $_state.addTypes(qtype.getValue());
4024      $_state.addAttribute("qtype", qtype);
4025      $_state.addAttribute("value", value);
4026      $_state.addAttribute("s_param", s_param);
4027      $_state.addAttribute("compressed_data", compressed_data);
4028    }]>
4029  ];
4030}
4031
4032def TFL_QuantizeOp: TFL_Op<"quantize", [
4033    FirstAttrDerivedResultType,
4034    SameOperandsAndResultShape]> {
4035  let summary = "Quantize operator";
4036
4037  let description = [{
4038    Converts floating point tensors to quantized integer tensors according to
4039    the quantization parameters defined in the type attribute.
4040  }];
4041
4042  let arguments = (
4043    ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$input,
4044    TensorTypeAttr:$qtype
4045  );
4046
4047  let results = (outs TFL_TensorOf<[QI8, QUI8, QI16, TFL_Quint8]>:$output);
4048}
4049
4050def TFL_DensifyOp: TFL_Op<"densify", [
4051    NoSideEffect,
4052    PredOpTrait<"input and output must have same element type",
4053      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4054  let summary = "Densify operator";
4055
4056  let description = [{
4057    Converts sparse tensor to dense format.
4058  }];
4059
4060  let arguments = (ins TFL_TensorOf<[F32, I8]>:$input);
4061
4062  let results = (outs TFL_TensorOf<[F32, I8]>:$output);
4063}
4064
4065//===----------------------------------------------------------------------===//
4066// LSTM Ops
4067//===----------------------------------------------------------------------===//
4068
4069def LstmMandatoryInputsConstraint : PredOpTrait<
4070  "mandatory operands element types should match",
4071  // TODO(ashwinm): Replace the indices with input tensor names when that
4072  // support is available.
4073  Or<[
4074    TCopVTEtAreSameAt<[0, 2, 3, 4, 6, 7, 8, 13, 14, 15, 18, 19]>,
4075    Neg<TypeIsPred<"input", F32>>]>>;
4076
4077def LstmOptionalPeepholeWeightConstraint : PredOpTrait<
4078  "the optional peephole weights should all be specified or none",
4079  // Ignore input 9 (cell_to_input_weights) for LSTM with CIFG.
4080  And<[
4081    TFL_TCopVTEtAreSameAt<10, 11, 16>,
4082    Or<[TFL_TCopVTEtAreSameAt<9, 10, 16>,
4083        And<[TypeIsPred<"input_to_input_weights", NoneType>,
4084             TypeIsPred<"cell_to_input_weights", NoneType>]>]>]>>;
4085
4086def LstmProjectionWeightBiasConstraint : PredOpTrait<
4087  "either projection weight must be specified or both projection weight and "
4088  "projection bias must not be specified",
4089   Or<[
4090      And<[TypeIsPred<"projection_weights", NoneType>,
4091           TypeIsPred<"projection_bias", NoneType>]>,
4092      Neg<TypeIsPred<"projection_weights", NoneType>>]>>;
4093
4094def LstmCifgInputConstraint : PredOpTrait<
4095  "the cifg inputs should all be specified or none",
4096   // If LSTM has combined input/forget gate, input 1, 5, 9, 12, 20 are all none
4097   // or 1, 5, 12 should not be none. Inputs 9 and 20 depend on LSTM's variants.
4098   Or<[
4099     And<[TypeIsPred<"input_to_input_weights", NoneType>,
4100          TypeIsPred<"recurrent_to_input_weights", NoneType>,
4101          TypeIsPred<"cell_to_input_weights", NoneType>,
4102          TypeIsPred<"input_gate_bias", NoneType>,
4103          TypeIsPred<"input_layer_norm_coefficients", NoneType>]>,
4104     Neg<Or<[
4105       TypeIsPred<"input_to_input_weights", NoneType>,
4106       TypeIsPred<"recurrent_to_input_weights", NoneType>,
4107       TypeIsPred<"input_gate_bias", NoneType>]>>]>>;
4108
4109
4110// TODO(b/137798843): Need to add an additional constraint for both LSTM and
4111// UnidirectionalSequenceLstm
4112// For layer norm: if layer norm is false, tensor {20, 21, 22, 23}
4113// are null; if layer norm is true, tensors {21, 22, 23} are not null; tensor
4114// {20} is not null if additionally cifg = false.
4115
4116def LstmResultConstraint : PredOpTrait<
4117  "the input and result tensor elemental types must be same",
4118  TFL_TCresVTEtIsSameAsOp<0, 0>>;
4119
4120// This is the basic kernel type LSTM op.
4121// TODO(b/142417845): Refactor this part to return its tflite node name as
4122// "lstm".
4123def TFL_BasicLSTMOp : TFL_Op<"basic_lstm", [NoSideEffect,
4124    TFL_OperandHasRank<0, 2>,
4125    TFL_OperandHasRank<1, 2>,
4126    TFL_OperandHasRank<2, 2>,
4127    TFL_OperandHasRank<3, 1>,
4128    TFL_OperandHasRank<4, 2>,
4129    QuantizableResult]> {
4130  let summary = "The basic lstm operator";
4131
4132  let description = [{
4133    basic LSTM Cell Operator.
4134  }];
4135
4136  let arguments = (
4137    ins TFL_TensorOf<[F32, QUI8]>:$data_input,
4138    TFL_TensorOf<[F32, QUI8]>:$prev_activ_input,
4139    TFL_TensorOf<[F32, QUI8]>:$weights_input,
4140    TFL_TensorOf<[F32, QI32]>:$biases_input,
4141    TFL_TensorOf<[F32, QI16]>:$prev_state_input,
4142
4143    // Attributes
4144    DefaultValuedStrAttr<TFL_AFAttr, "TANH">:$fused_activation_function,
4145    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4146    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4147    // Since this op is the BASIC kernel only, constrain it.
4148    ConfinedAttr<
4149      DefaultValuedAttr<TFL_LSTMKernelTypeAttr, "mlir::TFL::LSTMKernelType::BASIC">,
4150      [TFL_LSTMKernelTypeEqualsTo<"mlir::TFL::LSTMKernelType::BASIC">]>:$kernel_type
4151  );
4152
4153  let hasOptions = 1;
4154
4155  let results = (outs TFL_2DTensorOf<[F32, QUI8]>:$activ_output,
4156                      TFL_2DTensorOf<[F32, QUI16]>:$state_output,
4157                      TFL_2DTensorOf<[F32, QUI8]>:$concat_temp,
4158                      TFL_2DTensorOf<[F32, QUI16]>:$activ_temp);
4159}
4160
4161// This is the FULL kernel type LSTM op.
4162def TFL_LSTMOp :
4163  TFL_Op<"lstm",
4164          [LstmMandatoryInputsConstraint,
4165           LstmOptionalPeepholeWeightConstraint,
4166           LstmProjectionWeightBiasConstraint,
4167           LstmCifgInputConstraint,
4168           LstmResultConstraint,
4169           TFL_OperandHasRank<2, 2>,           // input_to_forget_weights
4170           TFL_OperandHasRank<3, 2>,           // input_to_cell_weights
4171           TFL_OperandIsNoneOrHasRank<5, 2>,   // recurrent_to_input_weights
4172           TFL_OperandHasRank<6, 2>,           // recurrent_to_forget_weights
4173           TFL_OperandHasRank<7, 2>,           // recurrent_to_cell_weights
4174           TFL_OperandIsNoneOrHasRank<9, 1>,   // cell_to_input_weights
4175           TFL_OperandIsNoneOrHasRank<10, 1>,  // cell_to_forget_weights
4176           TFL_OperandIsNoneOrHasRank<11, 1>,  // cell_to_output_weights
4177           TFL_OperandHasRank<13, 1>,          // forget_gate_bias
4178           TFL_OperandHasRank<14, 1>,          // cell_gate_bias
4179           TFL_OperandHasRank<15, 1>,          // output_gate_bias
4180           TFL_OperandIsNoneOrHasRank<16, 2>,  // projection_weights
4181           TFL_OperandIsNoneOrHasRank<17, 1>,  // projection_bias
4182           TFL_StatefulOp,
4183           QuantizableResult,
4184           DynamicRangeQuantizedOpInterface]> {
4185  let summary = "The full lstm operator";
4186
4187  let description = [{
4188Long short-term memory unit (LSTM) recurrent network layer.
4189The default non-peephole implementation is based on:
4190http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
4191S. Hochreiter and J. Schmidhuber. 'Long Short-Term Memory'. Neural Computation,
41929(8):1735-1780, 1997.
4193The peephole implementation is based on:
4194https://research.google.com/pubs/archive/43905.pdf
4195Hasim Sak, Andrew Senior, and Francoise Beaufays. 'Long short-term memory
4196recurrent neural network architectures for large scale acoustic modeling.'
4197INTERSPEECH, 2014.
4198The coupling of input and forget gate (CIFG) is based on:
4199http://arxiv.org/pdf/1503.04069.pdf
4200Greff et al. 'LSTM: A Search Space Odyssey'
4201The layer normalization is based on:
4202https://arxiv.org/pdf/1607.06450.pdf
4203Ba et al. 'Layer Normalization'
4204  }];
4205
4206  let arguments = (
4207    ins TFL_TensorOf<[F32, QI8]>:$input,
4208
4209    // Weights
4210    TFL_TensorOfOrNone<[F32, QI8]>:$input_to_input_weights,
4211    TFL_TensorOf<[F32, QI8]>:$input_to_forget_weights,
4212    TFL_TensorOf<[F32, QI8]>:$input_to_cell_weights,
4213    TFL_TensorOf<[F32, QI8]>:$input_to_output_weights,
4214
4215    // Recurrent weights
4216    TFL_TensorOfOrNone<[F32, QI8]>:$recurrent_to_input_weights,
4217    TFL_TensorOf<[F32, QI8]>:$recurrent_to_forget_weights,
4218    TFL_TensorOf<[F32, QI8]>:$recurrent_to_cell_weights,
4219    TFL_TensorOf<[F32, QI8]>:$recurrent_to_output_weights,
4220
4221    // Cell weights
4222    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_input_weights,
4223    // Optional input
4224    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_forget_weights,
4225    // Optional input
4226    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_output_weights,
4227
4228    // Bias
4229    TFL_TensorOfOrNone<[F32, QI32]>:$input_gate_bias,
4230    TFL_TensorOf<[F32, QI32]>:$forget_gate_bias,
4231    TFL_TensorOf<[F32, QI32]>:$cell_bias,
4232    TFL_TensorOf<[F32, QI32]>:$output_gate_bias,
4233
4234    // Projection weight and bias
4235    TFL_TensorOfOrNone<[F32, QI8]>:$projection_weights,
4236    // Optional input
4237    TFL_TensorOfOrNone<[F32, QI32]>:$projection_bias,
4238
4239    // Stateful activation and cell states.
4240    TFL_StatefulTensor:$input_activation_state,
4241    TFL_StatefulTensor:$input_cell_state,
4242
4243    // Layer norm coefficients
4244    TFL_TensorOfOrNone<[F32, QI16]>:$input_layer_norm_coefficients,
4245    TFL_TensorOfOrNone<[F32, QI16]>:$forget_layer_norm_coefficients,
4246    TFL_TensorOfOrNone<[F32, QI16]>:$cell_layer_norm_coefficients,
4247    TFL_TensorOfOrNone<[F32, QI16]>:$output_layer_norm_coefficients,
4248
4249    // Attributes
4250    TFL_AFAttr:$fused_activation_function,
4251    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4252    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4253    // Since this op is the FULL kernel only, constrain it.
4254    ConfinedAttr<
4255      DefaultValuedAttr<TFL_LSTMKernelTypeAttr, "mlir::TFL::LSTMKernelType::FULL">,
4256      [TFL_LSTMKernelTypeEqualsTo<"mlir::TFL::LSTMKernelType::FULL">]>:$kernel_type,
4257    // Used in post-training dynamic range quantization. If the value is true,
4258    // input activations are asymmetrically quantized.
4259    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs,
4260
4261    // Types of the optional intermediate tensors, which exist for fully
4262    // quantized LSTM op and hold the ranges of the intermediate tensors.
4263    // The type for intermediate tensors are be quant.calibrated when imported
4264    // to only store calibrated min, max values. The proper quantization spec is
4265    // determined while going through quantization passes.
4266    OptionalAttr<TypeAttr>:$input_to_input_intermediate,
4267    OptionalAttr<TypeAttr>:$input_to_forget_intermediate,
4268    OptionalAttr<TypeAttr>:$input_to_cell_intermediate,
4269    OptionalAttr<TypeAttr>:$input_to_output_intermediate,
4270    OptionalAttr<TypeAttr>:$effective_hidden_scale_intermediate
4271  );
4272
4273  let results = (outs AnyTensor:$output);
4274
4275  // TODO(fengliuai): customize printer and parser to not display
4276  // empty region.
4277  let regions = (region AnyRegion:$internal);
4278
4279  let hasOptions = 1;
4280
4281  let hasCanonicalizer = 1;
4282
4283  let hasVerifier = 1;
4284
4285  let extraClassDeclaration = [{
4286    // StatefulOpInterface:
4287    std::vector<int> GetStatefulOperands() { return {18, 19}; }
4288    // DynamicRangeQuantizedOpInterface:
4289    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
4290    bool GetDynamicRangeQuantKernelSupport() { return true; }
4291    std::vector<int> GetQuantizableOperandIndices() {
4292      return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16};
4293    }
4294  }];
4295}
4296
4297// UnidirectionalSequenceLstm op.
4298// TODO(ashwinm): Add constraint to validate the combination of operands
4299// that are valid for hybrid vs fully quantized vs float only semantics
4300def TFL_UnidirectionalSequenceLSTMOp :
4301  TFL_Op<"unidirectional_sequence_lstm",
4302          [LstmMandatoryInputsConstraint,
4303           LstmOptionalPeepholeWeightConstraint,
4304           LstmProjectionWeightBiasConstraint,
4305           LstmCifgInputConstraint,
4306           LstmResultConstraint,
4307           TFL_OperandHasRankAtLeast<0, 2>,    // input
4308           TFL_OperandIsNoneOrHasRank<1, 2>,   // input_to_input_weights
4309           TFL_OperandHasRank<2, 2>,           // input_to_forget_weights
4310           TFL_OperandHasRank<3, 2>,           // input_to_cell_weights
4311           TFL_OperandHasRank<4, 2>,           // input_to_output_weights
4312           TFL_OperandIsNoneOrHasRank<5, 2>,   // recurrent_to_input_weights
4313           TFL_OperandHasRank<6, 2>,           // recurrent_to_forget_weights
4314           TFL_OperandHasRank<7, 2>,           // recurrent_to_cell_weights
4315           TFL_OperandHasRank<8, 2>,           // recurrent_to_output_weights
4316           TFL_OperandIsNoneOrHasRank<9, 1>,   // cell_to_input_weights
4317           TFL_OperandIsNoneOrHasRank<10, 1>,  // cell_to_forget_weights
4318           TFL_OperandIsNoneOrHasRank<11, 1>,  // cell_to_output_weights
4319           TFL_OperandIsNoneOrHasRank<12, 1>,  // input_gate_bias
4320           TFL_OperandHasRank<13, 1>,          // forget_gate_bias
4321           TFL_OperandHasRank<14, 1>,          // cell_gate_bias
4322           TFL_OperandHasRank<15, 1>,          // output_gate_bias
4323           TFL_OperandIsNoneOrHasRank<16, 2>,  // projection_weights
4324           TFL_OperandIsNoneOrHasRank<17, 1>,  // projection_bias
4325           TFL_StatefulOp,
4326           DeclareOpInterfaceMethods<InferTypeOpInterface>,
4327           QuantizableResult,
4328           DynamicRangeQuantizedOpInterface
4329          ]> {
4330  let summary = "Unidirectional sequence lstm operator";
4331
4332  let description = [{
4333    A recurrent neural network specified by an LSTM cell. This Op supports
4334    unrolling the input along the time or batch dimensions, and
4335    implements the following operation for
4336    each element in the sequence s = 1...sequence_length:
4337      outputs[s] = state = activation(LSTMOp(inputs[s]))
4338
4339    where LSTMOp is LSTM TF Lite Op and the “activation” is the function passed
4340    as the “fused_activation_function” argument (if not “NONE”).
4341  }];
4342
4343  let arguments = (
4344    ins TFL_FpTensor:$input,
4345
4346    // Weights
4347    TFL_TensorOfOrNone<[F32, QI8]>:$input_to_input_weights,
4348    TFL_TensorOf<[F32, QI8]>:$input_to_forget_weights,
4349    TFL_TensorOf<[F32, QI8]>:$input_to_cell_weights,
4350    TFL_TensorOf<[F32, QI8]>:$input_to_output_weights,
4351
4352    // Recurrent weights
4353    TFL_TensorOfOrNone<[F32, QI8]>:$recurrent_to_input_weights,
4354    TFL_TensorOf<[F32, QI8]>:$recurrent_to_forget_weights,
4355    TFL_TensorOf<[F32, QI8]>:$recurrent_to_cell_weights,
4356    TFL_TensorOf<[F32, QI8]>:$recurrent_to_output_weights,
4357
4358    // Cell weights
4359    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_input_weights,
4360    // Optional input
4361    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_forget_weights,
4362    // Optional input
4363    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_output_weights,
4364
4365    // Bias
4366    TFL_TensorOfOrNone<[F32]>:$input_gate_bias,
4367    TFL_FpTensor:$forget_gate_bias,
4368    TFL_FpTensor:$cell_bias,
4369    TFL_FpTensor:$output_gate_bias,
4370
4371    // Projection weight and bias
4372    TFL_TensorOfOrNone<[F32, QI8]>:$projection_weights,
4373    // Optional input
4374    TFL_TensorOfOrNone<[F32]>:$projection_bias,
4375
4376    // Stateful activation and cell states.
4377    TFL_StatefulTensor:$input_activation_state,
4378    TFL_StatefulTensor:$input_cell_state,
4379
4380    // Layer norm coefficients
4381    TFL_TensorOfOrNone<[F32, QI8]>:$input_layer_norm_coefficients,
4382    TFL_TensorOfOrNone<[F32, QI8]>:$forget_layer_norm_coefficients,
4383    TFL_TensorOfOrNone<[F32, QI8]>:$cell_layer_norm_coefficients,
4384    TFL_TensorOfOrNone<[F32, QI8]>:$output_layer_norm_coefficients,
4385
4386    // Attributes
4387    TFL_AFAttr:$fused_activation_function,
4388    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4389    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4390    BoolAttr:$time_major,
4391    // Used in post-training dynamic range quantization. If the value is true,
4392    // input activations are asymmetrically quantized.
4393    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs,
4394
4395    // Types of the optional intermediate tensors, which exist for fully
4396    // quantized op and hold the ranges of the intermediate tensors.
4397    // The type for intermediate tensors are be quant.calibrated when imported
4398    // to only store calibrated min, max values. The proper quantization spec is
4399    // determined while going through quantization passes.
4400    OptionalAttr<TypeAttr>:$input_to_input_intermediate,
4401    OptionalAttr<TypeAttr>:$input_to_forget_intermediate,
4402    OptionalAttr<TypeAttr>:$input_to_cell_intermediate,
4403    OptionalAttr<TypeAttr>:$input_to_output_intermediate,
4404    OptionalAttr<TypeAttr>:$effective_hidden_scale_intermediate
4405  );
4406
4407  let results = (outs TFL_TensorOf<[F32, QI8]>:$output);
4408
4409  let hasOptions = 1;
4410
4411  let hasVerifier = 1;
4412
4413  let extraClassDeclaration = [{
4414    // StatefulOpInterface:
4415    std::vector<int> GetStatefulOperands() { return {18, 19}; }
4416
4417    // Compatiable return types check
4418    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
4419
4420    // DynamicRangeQuantizedOpInterface:
4421    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
4422    bool GetDynamicRangeQuantKernelSupport() { return true; }
4423    std::vector<int> GetQuantizableOperandIndices() {
4424      return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16};
4425    }
4426  }];
4427}
4428
4429def BidiLstmMandatoryInputsConstraint : PredOpTrait<
4430  "mandatory operands element types should match",
4431  // TODO(ashwinm): Replace the indices with input tensor names when that
4432  // support is available.
4433  Or<[
4434    TCopVTEtAreSameAt<[0, 2, 3, 4, 6, 7, 8, 13, 14, 15, 19, 20, 21, 23, 24, 25,
4435                       30, 31, 32, 35, 36, 37, 38]>,
4436    Neg<TypeIsPred<"input", F32>>]>>;
4437
4438// TODO(b/172517537): support quantized types
4439def BidiLstmOptionalPeepholeWeightConstraint : PredOpTrait<
4440  "the optional peephole weights should all be specified or none",
4441  TCopVTEtAreSameAt<[9, 10, 11, 26, 27, 28]>>;
4442
4443def BidiLstmProjectionWeightBiasConstraint : PredOpTrait<
4444  "either projection weight must be specified or both projection weight and "
4445  "projection bias must not be specified",
4446   Or<[
4447      And<[TypeIsPred<"fw_projection_weights", NoneType>,
4448           TypeIsPred<"fw_projection_bias", NoneType>,
4449           TypeIsPred<"bw_projection_weights", NoneType>,
4450           TypeIsPred<"bw_projection_bias", NoneType>]>,
4451      And<[
4452        Neg<TypeIsPred<"fw_projection_weights", NoneType>>,
4453        Neg<TypeIsPred<"bw_projection_weights", NoneType>>,
4454     ]>
4455   ]>>;
4456
4457// BidirectionalSequenceLstm op.
4458// TODO(ashwinm): Add constraint to validate the combination of operands
4459// that are valid for hybrid vs fully quantized vs float only semantics
4460def TFL_BidirectionalSequenceLSTMOp :
4461  TFL_Op<"bidirectional_sequence_lstm",
4462          [BidiLstmMandatoryInputsConstraint,
4463           BidiLstmOptionalPeepholeWeightConstraint,
4464           BidiLstmProjectionWeightBiasConstraint,
4465           LstmResultConstraint,
4466           TFL_OperandHasRank<0, 3>,   // input
4467           TFL_OperandHasRank<1, 2>,   // fw_input_to_input_weights
4468           TFL_OperandHasRank<2, 2>,   // fw_input_to_forget_weights
4469           TFL_OperandHasRank<3, 2>,   // fw_input_to_cell_weights
4470           TFL_OperandHasRank<4, 2>,   // fw_input_to_output_weights
4471           TFL_OperandHasRank<5, 2>,   // fw_recurrent_to_input_weights
4472           TFL_OperandHasRank<6, 2>,   // fw_recurrent_to_forget_weights
4473           TFL_OperandHasRank<7, 2>,   // fw_recurrent_to_cell_weights
4474           TFL_OperandHasRank<8, 2>,   // fw_recurrent_to_output_weights
4475           TFL_OperandHasRank<9, 1>,   // fw_cell_to_input_weights
4476           TFL_OperandHasRank<10, 1>,  // fw_cell_to_forget_weights
4477           TFL_OperandHasRank<11, 1>,  // fw_cell_to_output_weights
4478           TFL_OperandHasRank<12, 1>,  // fw_input_gate_bias
4479           TFL_OperandHasRank<13, 1>,  // fw_forget_gate_bias
4480           TFL_OperandHasRank<14, 1>,  // fw_cell_bias
4481           TFL_OperandHasRank<15, 1>,  // fw_output_gate_bias
4482           TFL_OperandHasRank<16, 2>,  // fw_projection_weights
4483           TFL_OperandHasRank<17, 1>,  // fw_projection_bias
4484           TFL_OperandHasRank<18, 2>,  // bw_input_to_input_weights
4485           TFL_OperandHasRank<19, 2>,  // bw_input_to_forget_weights
4486           TFL_OperandHasRank<20, 2>,  // bw_input_to_cell_weights
4487           TFL_OperandHasRank<21, 2>,  // bw_input_to_output_weights
4488           TFL_OperandHasRank<22, 2>,  // bw_recurrent_to_input_weights
4489           TFL_OperandHasRank<23, 2>,  // bw_recurrent_to_forget_weights
4490           TFL_OperandHasRank<24, 2>,  // bw_recurrent_to_cell_weights
4491           TFL_OperandHasRank<25, 2>,  // bw_recurrent_to_output_weights
4492           TFL_OperandHasRank<26, 1>,  // bw_cell_to_input_weights
4493           TFL_OperandHasRank<27, 1>,  // bw_cell_to_forget_weights
4494           TFL_OperandHasRank<28, 1>,  // bw_cell_to_output_weights
4495           TFL_OperandHasRank<29, 1>,  // bw_input_gate_bias
4496           TFL_OperandHasRank<30, 1>,  // bw_forget_gate_bias
4497           TFL_OperandHasRank<31, 1>,  // bw_cell_bias
4498           TFL_OperandHasRank<32, 1>,  // bw_output_gate_bias
4499           TFL_OperandHasRank<33, 2>,  // bw_projection_weights
4500           TFL_OperandHasRank<34, 1>,  // bw_projection_bias
4501           TFL_StatefulOp,
4502           QuantizableResult,
4503           DynamicRangeQuantizedOpInterface]> {
4504  let summary = "Bidirectional sequence lstm operator";
4505
4506  let description = [{
4507    Bidirectional lstm is essentially two lstms, one running forward & the
4508    other running backward. And the output is the concatenation of the two
4509    lstms.
4510  }];
4511
4512  let arguments = (
4513    ins TFL_TensorOf<[F32, I8]>:$input,
4514
4515    // Forward LSTM Weights
4516    TFL_TensorOfOrNone<[F32, I8]>:$fw_input_to_input_weights,
4517    TFL_TensorOf<[F32, I8]>:$fw_input_to_forget_weights,
4518    TFL_TensorOf<[F32, I8]>:$fw_input_to_cell_weights,
4519    TFL_TensorOf<[F32, I8]>:$fw_input_to_output_weights,
4520
4521    // Forward Recurrent weights
4522    TFL_TensorOfOrNone<[F32, I8]>:$fw_recurrent_to_input_weights,
4523    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_forget_weights,
4524    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_cell_weights,
4525    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_output_weights,
4526
4527    // Forward Cell weights
4528    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_input_weights,
4529    // Optional Forward cell weights
4530    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_forget_weights,
4531    // Optional Forward cell weights
4532    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_output_weights,
4533
4534    // Forward Bias
4535    TFL_TensorOfOrNone<[F32]>:$fw_input_gate_bias,
4536    TFL_TensorOf<[F32]>:$fw_forget_gate_bias,
4537    TFL_TensorOf<[F32]>:$fw_cell_bias,
4538    TFL_TensorOf<[F32]>:$fw_output_gate_bias,
4539
4540    // Forward Projection weight and bias
4541    TFL_TensorOfOrNone<[F32, I8]>:$fw_projection_weights,
4542    // Forward Optional input
4543    TFL_TensorOfOrNone<[F32]>:$fw_projection_bias,
4544
4545    // Backward LSTM Weights
4546    TFL_TensorOfOrNone<[F32, I8]>:$bw_input_to_input_weights,
4547    TFL_TensorOf<[F32, I8]>:$bw_input_to_forget_weights,
4548    TFL_TensorOf<[F32, I8]>:$bw_input_to_cell_weights,
4549    TFL_TensorOf<[F32, I8]>:$bw_input_to_output_weights,
4550
4551    // Backward Recurrent weights
4552    TFL_TensorOfOrNone<[F32, I8]>:$bw_recurrent_to_input_weights,
4553    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_forget_weights,
4554    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_cell_weights,
4555    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_output_weights,
4556
4557    // Backward Cell weights
4558    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_input_weights,
4559    // Optional Forward cell weights
4560    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_forget_weights,
4561    // Optional Forward cell weights
4562    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_output_weights,
4563
4564    // Backward Bias
4565    TFL_TensorOfOrNone<[F32]>:$bw_input_gate_bias,
4566    TFL_TensorOf<[F32]>:$bw_forget_gate_bias,
4567    TFL_TensorOf<[F32]>:$bw_cell_bias,
4568    TFL_TensorOf<[F32]>:$bw_output_gate_bias,
4569
4570    // Backward Projection weight and bias
4571    TFL_TensorOfOrNone<[F32, I8]>:$bw_projection_weights,
4572    // Backward Optional input
4573    TFL_TensorOfOrNone<[F32]>:$bw_projection_bias,
4574
4575    // Stateful activation and cell states.
4576    TFL_StatefulTensor:$fw_input_activation_state,
4577    TFL_StatefulTensor:$fw_input_cell_state,
4578    TFL_StatefulTensor:$bw_input_activation_state,
4579    TFL_StatefulTensor:$bw_input_cell_state,
4580
4581    // Auxiliary input & weights.
4582    TFL_TensorOfOrNone<[F32, I8]>:$aux_input,
4583    // Auxiliary fw weights.
4584    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_input_weights,
4585    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_forget_weights,
4586    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_cell_weights,
4587    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_output_weights,
4588    // Auxiliary bw weights.
4589    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_input_weights,
4590    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_forget_weights,
4591    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_cell_weights,
4592    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_output_weights,
4593
4594    // Attributes
4595    TFL_AFAttr:$fused_activation_function,
4596    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4597    ConfinedAttr<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4598    BoolAttr:$merge_outputs,
4599    BoolAttr:$time_major,
4600    // Used in post-training dynamic range quantization. If the value is true,
4601    // input activations are asymmetrically quantized.
4602    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs
4603  );
4604
4605  let results = (outs
4606    AnyTensor:$fw_output,
4607    AnyTensor:$bw_output
4608  );
4609
4610  let hasOptions = 1;
4611
4612  let hasVerifier = 1;
4613
4614  let extraClassDeclaration = [{
4615    // StatefulOpInterface:
4616    std::vector<int> GetStatefulOperands() { return {35, 36, 37, 38}; }
4617
4618    // DynamicRangeQuantizedOpInterface:
4619    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
4620    bool GetDynamicRangeQuantKernelSupport() { return true; }
4621    std::vector<int> GetQuantizableOperandIndices() {
4622      return {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 16, 18, 19, 20, 21,
4623              22, 23, 24, 25, 26, 27, 28, 33, 40, 41, 42, 43, 44, 45, 46, 47};
4624    }
4625  }];
4626}
4627
4628// UnidirectionalSequenceRNN op.
4629def TFL_UnidirectionalSequenceRNNOp : TFL_Op<"unidirectional_sequence_rnn", [
4630    TFL_OperandHasRank<4, 2>,
4631    PredOpTrait<"input and output must have same element type",
4632      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4633    PredOpTrait<"input and constant value operands must have same element type",
4634      TFL_TCopVTEtAreSameAt<1, 2>>,
4635    TFL_StatefulOp,
4636    QuantizableResult,
4637    DynamicRangeQuantizedOpInterface]> {
4638  let summary = "Unidirectional sequence rnn operator";
4639
4640  let description = [{
4641    A recurrent neural network specified by an RNN cell. This Op takes in input
4642    in a format {batch_size, seq_len, input_size} or
4643    {seq_len, batch_size, input_size} if it's time-majored.
4644
4645    It implements the following operation for
4646    each element in the sequence s = 1...sequence_length:
4647      outputs[s] = state = activation(RNNOp(inputs[s]))
4648
4649    where RNNOp is RNNOp TF Lite Op and the “activation” is the function passed
4650    as the “fused_activation_function” argument (if not “NONE”).
4651  }];
4652
4653  let arguments = (
4654    ins TFL_FpTensor:$input,
4655
4656    // Weights
4657    TFL_TensorOf<[F32, QI8]>:$input_to_input_weights,
4658
4659    // Recurrent weights
4660    TFL_TensorOf<[F32, QI8]>:$recurrent_to_input_weights,
4661
4662    // Bias
4663    TFL_FpTensor:$input_gate_bias,
4664
4665    // Hidden state.
4666    TFL_StatefulTensor:$hidden_state,
4667
4668    // Attributes
4669    BoolAttr:$time_major,
4670    TFL_AFAttr:$fused_activation_function,
4671    // Used in post-training dynamic range quantization. If the value is true,
4672    // input activations are asymmetrically quantized.
4673    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs
4674  );
4675
4676  let results = (outs TFL_FpTensor:$output);
4677
4678  let hasOptions = 1;
4679
4680  let customOption = "SequenceRNNOptions";
4681
4682  let hasVerifier = 1;
4683
4684  let extraClassDeclaration = [{
4685    // StatefulOpInterface:
4686    std::vector<int> GetStatefulOperands() { return {4}; }
4687
4688    // DynamicRangeQuantizedOpInterface:
4689    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
4690    bool GetDynamicRangeQuantKernelSupport() { return true; }
4691    std::vector<int> GetQuantizableOperandIndices() {
4692      return {1, 2};
4693    }
4694  }];
4695}
4696
4697def TFL_WhereOp : TFL_Op<"where", [NoSideEffect]> {
4698  let summary = "Returns locations of nonzero / true values in a tensor.";
4699
4700  let description = [{
4701This operation returns the coordinates of true elements in `condition`. The
4702coordinates are returned in a 2-D tensor where the first dimension (rows)
4703represents the number of true elements, and the second dimension (columns)
4704represents the coordinates of the true elements. Keep in mind, the shape of
4705the output tensor can vary depending on how many true values there are in
4706`condition`. Indices are output in row-major order.
4707  }];
4708
4709  let arguments = (ins
4710    TFL_TensorOf<[I1, F32, TFL_Int32Or64, I8, UI8, UI32]>:$condition
4711  );
4712
4713  let results = (outs
4714    TFL_I64Tensor:$index
4715  );
4716}
4717
4718def TFL_NumericVerifyOp : Op<TFL_Dialect, "NumericVerify", [
4719    QuantizableResult,
4720    SameOperandsShape,
4721    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
4722
4723  let summary = "Verifies the numericals of the two operands";
4724
4725  let description = [{
4726    The NumericVerify op is a debugging op to verify the numericals of the two
4727    activations. It is a custom op in TFLite.
4728    If log_if_failed is true, the NumericVerify op calculates statistics on
4729    differences between float and quantized activations, output
4730    logs, set differences to the output tensors, and throws an error if errors
4731    above tolerance exist. If log_if_failed = false, then it doesn't care about
4732    errors.
4733  }];
4734
4735  let arguments = (ins
4736    TFL_TensorOf<[QI8, QUI8, QI16, F16, TFL_Quint8]>:$input,
4737    TFL_TensorOf<[F32]>:$ref,
4738
4739    // Attributes
4740    DefaultValuedAttr<F32Attr, "0.1">:$tolerance,
4741    DefaultValuedAttr<BoolAttr, "false">:$log_if_failed
4742  );
4743
4744  let results = (outs TFL_FpTensor:$output);
4745}
4746
4747// SVDF op.
4748def TFL_SVDFOp :
4749  TFL_Op<"svdf", [
4750    PredOpTrait<"the input and result tensor elemental types must be same",
4751      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4752    TFL_StatefulOp,
4753    AccumulatorUniformScale<3, 2, 4>,
4754    QuantizableResult,
4755    DynamicRangeQuantizedOpInterface]> {
4756
4757  let summary = "Single value decomposition filter operator";
4758
4759  let description = [{
4760    The SVDF op is a decomposition of a densely connected op into low rank
4761    filters.
4762    For details: https://research.google.com/pubs/pub43813.html
4763                 https://arxiv.org/abs/1812.02802
4764  }];
4765
4766  let arguments = (
4767    ins TFL_TensorOf<[F32, QI8]>:$input,
4768
4769    // Feature Weights.
4770    TFL_TensorOf<[F32, QI8, QUI8]>:$feature_weights,
4771
4772    // Time weights
4773    TFL_TensorOf<[F32, QI16]>:$time_weights,
4774
4775    // Bias
4776    TFL_TensorOfOrNone<[F32, QI32]>:$input_gate_bias,
4777
4778    // Activation state.
4779    TFL_StatefulTensor:$activation_state,
4780
4781    // Attributes
4782    ConfinedAttr<I32Attr, [IntPositive]>:$rank,
4783    TFL_AFAttr:$fused_activation_function,
4784    // Used in post-training dynamic range quantization. If the value is true,
4785    // input activations are asymmetrically quantized.
4786    OptionalAttr<BoolAttr>:$asymmetric_quantize_inputs
4787  );
4788
4789  let results = (outs TFL_TensorOf<[F32, QI8]>:$output);
4790
4791  let hasOptions = 1;
4792
4793  let hasVerifier = 1;
4794
4795  let extraClassDeclaration = [{
4796    // StatefulOpInterface:
4797    std::vector<int> GetStatefulOperands() { return {4}; }
4798
4799    // DynamicRangeQuantizedOpInterface:
4800    bool RequireAsymmetricQuantizeInputsAttr() { return true; }
4801    bool GetDynamicRangeQuantKernelSupport() { return true; }
4802    std::vector<int> GetQuantizableOperandIndices() {
4803      return {1, 2};
4804    }
4805  }];
4806}
4807
4808def TFL_SegmentSumOp: TFL_Op<"segment_sum", [
4809    NoSideEffect,
4810    PredOpTrait<"input and output must have same element type",
4811      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4812  let summary = "SegmentSum operator";
4813
4814  let description = [{
4815    Computes the sum along segments of a tensor.
4816  }];
4817
4818  let arguments = (ins
4819    TFL_TensorOf<[F32, I32]>:$input,
4820    TFL_I32Tensor:$segment_ids
4821  );
4822  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4823}
4824
4825def TFL_UnsortedSegmentProdOp: TFL_Op<"unsorted_segment_prod", [
4826    NoSideEffect,
4827    PredOpTrait<"input and output must have same element type",
4828      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4829  let summary = "UnsortedSegmentProd operator";
4830
4831  let description = [{
4832    Computes the product along segments of a tensor.
4833  }];
4834
4835  let arguments = (ins
4836    TFL_TensorOf<[F32, I32]>:$input,
4837    TFL_I32Tensor:$segment_ids,
4838    TFL_I32Tensor:$num_segments
4839  );
4840  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4841}
4842
4843def TFL_UnsortedSegmentMaxOp: TFL_Op<"unsorted_segment_max", [
4844    NoSideEffect, PredOpTrait<"input and output must have same element type",
4845      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4846
4847  let summary = "UnsortedSegmentMax operator";
4848
4849  let description = [{
4850    Computes the maximum value along segments of a tensor such that
4851    output[i] = max(data[j....]) where segment_ids[j...] = i
4852    if the maximum is empty for a given segment ID i,
4853    it outputs the smallest possible value for the specific numeric type,
4854    output[i] = numeric_limits::lowest().
4855    Note the values of segment_ids are always validated to be less than
4856    num_segments and an error is thrown for out-of-bound indices.
4857  }];
4858
4859  let arguments = (ins
4860    TFL_TensorOf<[F32, I32]>:$input,
4861    TFL_I32Tensor:$segment_ids,
4862    TFL_I32Tensor:$num_segments
4863  );
4864
4865  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4866}
4867
4868def TFL_UnsortedSegmentMinOp: TFL_Op<"unsorted_segment_min", [
4869    NoSideEffect, PredOpTrait<"input and output must have same element type",
4870      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4871
4872  let summary = "UnsortedSegmentMin operator";
4873
4874  let description = [{
4875    Computes the minimum value along segments of a tensor such that
4876    output[i] = min(data[j....]) where segment_ids[j...] = i
4877    if the minimum is empty for a given segment ID i,
4878    it outputs the largest possible value for the specific numeric type,
4879    output[i] = numeric_limits::max().
4880    Note the values of segment_ids are always validated to be less than
4881    num_segments and an error is thrown for out-of-bound indices.
4882  }];
4883
4884  let arguments = (ins
4885    TFL_TensorOf<[F32, I32]>:$input,
4886    TFL_I32Tensor:$segment_ids,
4887    TFL_I32Tensor:$num_segments
4888  );
4889
4890  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4891}
4892
4893def TFL_UnsortedSegmentSumOp: TFL_Op<"unsorted_segment_sum", [
4894    NoSideEffect, PredOpTrait<"input and output must have same element type",
4895      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
4896
4897  let summary = "UnsortedSegmentSum operator";
4898
4899  let description = [{
4900    From a tensor segmentation, computes the `output` resulting from
4901    summing together elements mapped to the same segment_id. I.e. `output[i]` is
4902    equal to the tensor sum of all elements from the input tensor mapped to
4903    segment_id `i`. If no tensors are mapped to a particular included
4904    segment_id, the output at that indice will be a zero tensor with the
4905    appropriate shape. Note the values of segment_ids are always validated to be
4906    less than num_segments and an error is thrown for out-of-bound indices
4907  }];
4908
4909  let arguments = (ins
4910    TFL_TensorOf<[F32, I32]>:$input,
4911    TFL_I32Tensor:$segment_ids,
4912    TFL_I32Tensor:$num_segments
4913  );
4914
4915  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4916}
4917
4918def TFL_Atan2Op: TFL_Op<"atan2", [
4919  NoSideEffect,
4920  SameOperandsAndResultShape,
4921  SameOperandsAndResultElementType]> {
4922
4923  let summary = "Atan2 operation";
4924  let description = [{
4925    The "atan2" operation computes the arctangent of y/x element-wise,
4926    respecting signs of the arguments.
4927  }];
4928
4929  let arguments = (ins
4930    TFL_TensorOf<[F32, F64]>:$y,
4931    TFL_TensorOf<[F32, F64]>:$x
4932  );
4933
4934  let results = (outs
4935    TFL_TensorOf<[F32, F64]>:$output
4936  );
4937}
4938
4939def TFL_YieldOp : Op<TFL_Dialect, "yield",
4940  [NoSideEffect,
4941   Terminator,
4942   QuantizableResult,
4943   DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
4944  let summary = "Yield operation";
4945  let description = [{
4946    The "yield" operation represents a return operation within the conditional
4947    and body of structured control flow (e.g., while), and a terminator for ControlNodeOp.
4948    The operation takes a variable number of operands and produces no results.
4949    The operand number and types must match the signature of the region that contains the operation.
4950  }];
4951  let arguments = (ins Variadic<AnyType>:$operands);
4952
4953  // Default builder needed for ensureTerminator
4954  let builders = [
4955    OpBuilder<(ins),
4956    [{
4957      build($_builder, $_state, {});
4958    }]>
4959  ];
4960}
4961
4962def TFL_IfOp : Op<TFL_Dialect, "if", [
4963    DeclareOpInterfaceMethods<RegionBranchOpInterface>,
4964    SingleBlockImplicitTerminator<"YieldOp">,
4965    RecursiveSideEffects,
4966    NoRegionArguments,
4967    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
4968  let summary = [{if-then-else operation}];
4969
4970  let description = [{
4971    The `tfl.if` operation represents an if-then-else construct for
4972    conditionally executing two regions of code. The operand to an if operation
4973    is a boolean value. For example:
4974
4975    ```mlir
4976    tfl.if %b  {
4977      ...
4978    } else {
4979      ...
4980    }
4981    ```
4982
4983    `tfl.if` may also return results that are defined in its regions. The
4984    values defined are determined by which execution path is taken.
4985
4986    Example:
4987
4988    ```mlir
4989    %x, %y = tfl.if %b -> (tensor<f32>, tensor<f32>) {
4990      %x_true = ...
4991      %y_true = ...
4992      tfl.yield %x_true, %y_true : tensor<f32>, tensor<f32>
4993    } else {
4994      %x_false = ...
4995      %y_false = ...
4996      tfl.yield %x_false, %y_false : tensor<f32>, tensor<f32>
4997    }
4998    ```
4999
5000    `tfl.if` regions are always terminated with "tfl.yield". If "tfl.if"
5001    defines no values, the "tfl.yield" can be left out, and will be inserted
5002    implicitly. Otherwise, it must be explicit.
5003    Also, if "tfl.if" defines one or more values, the 'else' block cannot be
5004    omitted.
5005
5006    Example:
5007
5008    ```mlir
5009    tfl.if %b  {
5010      ...
5011    }
5012    ```
5013  }];
5014
5015  let arguments = (ins TFL_BoolTensor:$cond);
5016  let results = (outs Variadic<AnyTensor>:$results);
5017  let regions = (region SizedRegion<1>:$then_region, AnyRegion:$else_region);
5018
5019  let extraClassDeclaration = [{
5020    OpBuilder getThenBodyBuilder(OpBuilder::Listener *listener = nullptr) {
5021      Block* body = getBody(0);
5022      return results().empty() ? OpBuilder::atBlockTerminator(body, listener)
5023                               : OpBuilder::atBlockEnd(body, listener);
5024    }
5025    OpBuilder getElseBodyBuilder(OpBuilder::Listener *listener = nullptr) {
5026      Block* body = getBody(1);
5027      return results().empty() ? OpBuilder::atBlockTerminator(body, listener)
5028                               : OpBuilder::atBlockEnd(body, listener);
5029    }
5030  }];
5031
5032  // Canonicalizer wasn't defined for this one. In practise, we legalize the
5033  // tf.IfOp to scf.If op first and then legalize it to tfl.if to reduce
5034  // code redundancy.
5035}
5036
5037def TFL_WhileOp : Op<TFL_Dialect, "while", [
5038    DeclareOpInterfaceMethods<LoopLikeOpInterface, ["isDefinedOutsideOfLoop"]>,
5039    SingleBlockImplicitTerminator<"YieldOp">,
5040    DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
5041  let summary = [{While loop}];
5042
5043  let description = [{
5044    output = input; while (cond(output)) { output = body(output) }
5045
5046    While loop where all values are passes through arguments with implicit
5047    capture.
5048
5049    input: A list of input tensors whose types are T.
5050    output: A list of output tensors whose types are T.
5051    cond: A region that takes 'input' and returns a boolean scalar tensor.
5052    body: A region that takes a list of tensors and returns another
5053          list of tensors. Both lists have the same types.
5054  }];
5055
5056  let arguments = (ins
5057    Variadic<AnyTensor>:$input,
5058
5059    // Used to map StatelessWhile and While op defined in TensorFlow to a common
5060    // op.
5061    DefaultValuedAttr<BoolAttr, "false">:$is_stateless
5062  );
5063  let results = (outs Variadic<AnyTensor>:$output);
5064
5065  let regions = (region SizedRegion<1>:$cond, SizedRegion<1>:$body);
5066
5067  let hasVerifier = 1;
5068
5069  let hasCanonicalizer = 1;
5070}
5071
5072def TFL_CallOnceOp : TFL_Op<"call_once", []> {
5073  let summary = "Invokes an initialization function";
5074
5075  let description = [{
5076This operation invokes the given initialization function for the session
5077initializer in tf saved model dialect.
5078  }];
5079
5080  let arguments = (ins
5081    StrAttr:$session_init_function
5082  );
5083
5084  let results = (outs);
5085}
5086
5087def TFL_CustomOp : Op<TFL_Dialect, "custom", [
5088  DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
5089  let summary = "Custom op";
5090
5091  let description = [{
5092    A generic op for any TFLite custom operation.
5093
5094    input: A list of inputs in the original op.
5095    custom_code: A string used to identify which exactly this op is, which
5096                 corresponds to operator_codes.custom_code in the flatbuffer.
5097    custom_option: a holder to save the op attributes in bytes fashion.
5098    output: A list of outputs in the original op.
5099  }];
5100
5101  let arguments = (ins
5102    Variadic<TFL_TensorOfOrNone<[AnyType]>>:$input,
5103    StrAttr:$custom_code,
5104    TFL_ConstBytesAttr:$custom_option
5105  );
5106  let results = (outs Variadic<AnyTensor>:$output);
5107
5108  let hasVerifier = 1;
5109}
5110
5111def TFL_CustomTfOp : Op<TFL_Dialect, "custom_tf", [
5112  RecursiveSideEffects,
5113  IsolatedFromAbove,
5114  SingleBlockImplicitTerminator<"YieldOp">,
5115  DeclareOpInterfaceMethods<InferTypeOpInterface>,
5116  DeclareOpInterfaceMethods<TFL_RuntimeVerification>]> {
5117  let summary = "Wrapper Op for TF custom ops.";
5118
5119  let description = [{
5120    A wrapper op around any Custom TF op. These includes ops defined using
5121    custom_opdefs or linked which are not defined in TF dialect.
5122    This Op just wraps the custom op inside a region.
5123    Note #1, this Op will not include TF Lite custom ops defined using CustomOp.
5124    Note #2, this op is just internal representation inside the converter and
5125    are not exposed/exported when the model is exported to Flatbuffer.
5126  }];
5127
5128  let arguments = (ins
5129    Variadic<TFL_TensorOfOrNone<[AnyType]>>:$input
5130  );
5131  let results = (outs Variadic<AnyTensor>:$output);
5132
5133  let regions = (region SizedRegion<1>:$body);
5134
5135  let extraClassDeclaration = [{
5136    // Returns whether the return types are compatible.
5137    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
5138  }];
5139}
5140
5141def TFL_BroadcastToOp : TFL_Op<"broadcast_to", [
5142    PredOpTrait<"input and output must have same element type",
5143      TFL_TCresVTEtIsSameAsOp<0, 0>>,
5144    TFL_OperandHasRankAtMost<0, 8>,
5145    TFL_OperandHasRank<1, 1>,
5146    PredOpTrait<"output dimension count must be at most 8",
5147      Or<[TFL_OperandIsUnrankedPred<1>,
5148          TFL_OperandDimIsAtMost<1, 0, 8>]>>,
5149    QuantizableResult,
5150    NoSideEffect]> {
5151  let summary = "Broadcast an array for a compatible shape.";
5152
5153  let description = [{
5154Broadcasting is the process of making arrays to have compatible shapes
5155for arithmetic operations. Two shapes are compatible if for each
5156dimension pair they are either equal or one of them is one. When trying
5157to broadcast a Tensor to a shape, it starts with the trailing dimensions,
5158and works its way forward.
5159
5160For example,
5161
5162>>> x = tf.constant([1, 2, 3])
5163>>> y = tf.broadcast_to(x, [3, 3])
5164>>> print(y)
5165tf.Tensor(
5166    [[1 2 3]
5167     [1 2 3]
5168     [1 2 3]], shape=(3, 3), dtype=int32)
5169
5170In the above example, the input Tensor with the shape of `[1, 3]`
5171is broadcasted to output Tensor with shape of `[3, 3]`.
5172
5173When doing broadcasted operations such as multiplying a tensor
5174by a scalar, broadcasting (usually) confers some time or space
5175benefit, as the broadcasted tensor is never materialized.
5176
5177However, `broadcast_to` does not carry with it any such benefits.
5178The newly-created tensor takes the full memory of the broadcasted
5179shape. (In a graph context, `broadcast_to` might be fused to
5180subsequent operation and then be optimized away, however.)
5181  }];
5182
5183  let arguments = (ins
5184    TFL_TensorOf<[F32, I32, I1, I8, QI8, UI8, QUI8, I16, QI16, I64, Complex<F<32>>]>:$input,
5185    TFL_I32OrI64Tensor:$shape
5186  );
5187
5188  let results = (outs
5189    TFL_TensorOf<[F32, I32, I1, I8, QI8, UI8, QUI8, I16, QI16, I64, Complex<F<32>>]>:$output
5190  );
5191
5192  let hasCanonicalizer = 1;
5193}
5194
5195def TFL_RFFT2dOp : TFL_Op<"rfft2d", [NoSideEffect]> {
5196  let summary = "2D real-valued fast Fourier transform.";
5197
5198  let description = [{
5199Computes the 2-dimensional discrete Fourier transform of a real-valued signal
5200over the inner-most 2 dimensions of `input`.
5201
5202Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
5203`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
5204of `output`: the zero-frequency term, followed by the `fft_length / 2`
5205positive-frequency terms.
5206
5207Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
5208corresponding dimension of `input`, the dimension is cropped. If it is larger,
5209the dimension is padded with zeros.
5210  }];
5211
5212  let arguments = (ins
5213    TFL_FpTensor:$input,
5214    TFL_I32Tensor:$fft_length
5215  );
5216
5217  let results = (outs
5218    TFL_Complex64Tensor:$output
5219  );
5220}
5221
5222def TFL_VarHandleOp : TFL_Op<"var_handle", []> {
5223  let summary = "Returns a handle to a variable resource from its name.";
5224
5225  let description = [{
5226    Returns a handle for a variable resource from its name.
5227    container: the container this variable is placed in.
5228    shared_name: the name by which this variable is referred to.
5229  }];
5230
5231  let arguments = (ins
5232    DefaultValuedStrAttr<StrAttr, "">:$container,
5233    DefaultValuedStrAttr<StrAttr, "">:$shared_name
5234  );
5235
5236  let results = (outs TFL_ResourceTensor:$resource_handle);
5237
5238  let hasOptions = 1;
5239}
5240
5241def TFL_AssignVariableOp : TFL_Op<"assign_variable", []> {
5242  let summary = "Assigns a new value to a variable.";
5243
5244  let description = [{
5245Any ReadVariableOp with a control dependency on this op is guaranteed to return
5246this value or a subsequent newer value of the variable.
5247  }];
5248
5249  let arguments = (ins
5250    TFL_ResourceTensor:$resource_id,
5251    TFL_TensorOf<[F32, F64, I1, UI8, I8, QI8, QUI8, I32, I64, QI16, Complex<F<32>>, Complex<F<64>>]>:$value
5252  );
5253
5254  let results = (outs);
5255}
5256
5257def TFL_ReadVariableOp : TFL_Op<"read_variable", []> {
5258  let summary = "Reads variable value.";
5259
5260  let description = [{
5261Read variable data identified by 'resource_id'.
5262  }];
5263
5264  let arguments = (ins
5265    TFL_ResourceTensor:$resource_id
5266  );
5267
5268  let results = (outs TFL_TensorOf<[F32, F64, I1, UI8, I8, QI8, QUI8, I32, I64, QI16, Complex<F<32>>, Complex<F<64>>]>:$result);
5269}
5270
5271def TFL_Conv3DOp : TFL_Op<"conv_3d", [
5272    NoSideEffect,
5273    AccumulatorUniformScale<2, 0, 1>,
5274    TFL_OperandHasRank<0, 5>,
5275    TFL_OperandHasRank<1, 5>,
5276    // Channel dimension in input and filter should match.
5277    TFL_OperandsHaveSameDimsTrait<0, 1, 4, 3>,
5278    PredOpTrait<"input and output must have same element type",
5279      TFL_TCresVTEtIsSameAsOp<0, 0>>,
5280    PredOpTrait<"bias and output must have same element type",
5281      Or<[
5282        TFL_OperandIsNoneType<2>,
5283        TFL_TCresVTEtIsSameAsOp<0, 2>]>>,
5284    PredOpTrait<"bias must has num of elements equals to 4th dim of filter",
5285      Or<[
5286        TFL_OperandIsNoneType<2>,
5287        TFL_NumElementsEqualsDim<2, 1, 4>]>>]> {
5288  let summary = "Convolution 3D operator";
5289
5290  let description = [{
5291    Performs convolution operation on 3D inputs.
5292    Inputs:
5293      `inputs[0]`: required: the input activation tensor
5294      `inputs[1]`: required: the filter weight tensor
5295      `inputs[2]`: optional: the bias tensor
5296  }];
5297
5298  let arguments = (ins
5299    TFL_TensorOf<[F32]>:$input,
5300    TFL_TensorOf<[F32]>:$filter,
5301    TFL_TensorOfOrNone<[F32]>:$bias,
5302    I32Attr:$dilation_d_factor,
5303    I32Attr:$dilation_h_factor,
5304    I32Attr:$dilation_w_factor,
5305    TFL_AFAttr:$fused_activation_function,
5306    TFL_PaddingAttr:$padding,
5307    I32Attr:$stride_d,
5308    I32Attr:$stride_h,
5309    I32Attr:$stride_w
5310  );
5311
5312  let results = (outs TFL_TensorOf<[F32]>:$output);
5313
5314  let hasOptions = 1;
5315
5316  let customOption = "Conv3DOptions";
5317}
5318
5319def TFL_Conv3DTransposeOp : TFL_Op<"conv_3d_transpose", [
5320    NoSideEffect,
5321    AccumulatorUniformScale<2, 0, 1>,
5322    TFL_OperandHasRank<0, 1>,
5323    TFL_OperandHasRank<1, 5>,
5324    TFL_OperandHasRank<2, 5>,
5325    TFL_NumElementsTrait<0, 5>,
5326    // Channel dimension in input and filter should match.
5327    TFL_OperandsHaveSameDimsTrait<2, 1, 4, 4>,
5328    PredOpTrait<"input and output must have same element type",
5329      TFL_TCresVTEtIsSameAsOp<0, 2>>,
5330    PredOpTrait<"bias and output must have same element type",
5331      Or<[
5332        TFL_OperandIsNoneType<3>,
5333        TFL_TCresVTEtIsSameAsOp<0, 3>]>>,
5334    PredOpTrait<"bias must has num of elements equals to 4th dim of filter",
5335      Or<[
5336        TFL_OperandIsNoneType<3>,
5337        TFL_NumElementsEqualsDim<3, 1, 4>]>>]> {
5338  let summary = "Transposed Convolution 3D operator";
5339
5340  let description = [{
5341    Performs transposed convolution operation on 3D inputs.
5342    Inputs:
5343      `inputs[0]`: required: the shape of output tensor
5344      `inputs[1]`: required: the filter weight tensor
5345      `inputs[2]`: required: the input activation tensor
5346      `inputs[3]`: optional: the bias tensor
5347  }];
5348
5349  let arguments = (ins
5350    TFL_I32Tensor:$output_shape,
5351    TFL_TensorOf<[F32]>:$filter,
5352    TFL_TensorOf<[F32]>:$input,
5353    TFL_TensorOfOrNone<[F32]>:$bias,
5354    I32Attr:$dilation_d_factor,
5355    I32Attr:$dilation_h_factor,
5356    I32Attr:$dilation_w_factor,
5357    TFL_AFAttr:$fused_activation_function,
5358    TFL_PaddingAttr:$padding,
5359    I32Attr:$stride_d,
5360    I32Attr:$stride_h,
5361    I32Attr:$stride_w
5362  );
5363
5364  let results = (outs TFL_TensorOf<[F32]>:$output);
5365
5366  let hasOptions = 1;
5367
5368  let customOption = "Conv3DOptions";
5369}
5370
5371def TFL_ComplexAbsOp : TFL_Op<"complex_abs", [
5372  NoSideEffect,
5373  SameOperandsAndResultShape]> {
5374  let summary = "Computes the complex absolute value of a tensor.";
5375
5376  let description = [{
5377Given a tensor `x` of complex numbers, this operation returns a tensor of type
5378`float` or `double` that is the absolute value of each element in `x`. All
5379elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
5380value is computed as \\( \sqrt{a^2 + b^2}\\).
5381  }];
5382
5383  let arguments = (ins
5384    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
5385  );
5386
5387  let results = (outs
5388    TFL_TensorOf<[F32, F64]>:$output
5389  );
5390}
5391
5392def TFL_RealOp : TFL_Op<"real", [
5393  NoSideEffect,
5394  SameOperandsAndResultShape]> {
5395  let summary = "Returns the real part of a complex number.";
5396
5397  let description = [{
5398Given a tensor `input` of complex numbers, this operation returns a tensor of
5399type `float` that is the real part of each element in `input`. All elements in
5400`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
5401 part returned by this operation and *b* is the imaginary part.
5402  }];
5403
5404  let arguments = (ins
5405    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
5406  );
5407
5408  let results = (outs
5409    TFL_TensorOf<[F32, F64]>:$output
5410  );
5411}
5412
5413def TFL_ImagOp : TFL_Op<"imag", [
5414  NoSideEffect,
5415  SameOperandsAndResultShape]> {
5416  let summary = "Returns the imaginary part of a complex number.";
5417
5418  let description = [{
5419Given a tensor `input` of complex numbers, this operation returns a tensor of
5420type `float` that is the imaginary part of each element in `input`. All
5421elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
5422is the real part and *b* is the imaginary part returned by this operation.
5423  }];
5424
5425  let arguments = (ins
5426    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
5427  );
5428
5429  let results = (outs
5430    TFL_TensorOf<[F32, F64]>:$output
5431  );
5432}
5433
5434def TFL_HashtableOp: TFL_Op<"hashtable", []> {
5435  let summary = "Creates a non-initialized hash table.";
5436  let description = [{
5437This op creates a hash table, specifying the type of its keys and values.
5438Before using the table you will have to initialize it.  After initialization the
5439table will be immutable.
5440  }];
5441
5442  let arguments = (ins
5443    I32Attr:$table_id,
5444    TypeAttr:$key_dtype,
5445    TypeAttr:$value_dtype
5446  );
5447
5448  let results = (outs TFL_ResourceTensor:$out);
5449
5450  let hasOptions = 1;
5451}
5452
5453def TFL_HashtableFindOp: TFL_Op<"hashtable_find", []> {
5454  let summary = "Looks up keys in a table, outputs the corresponding values.";
5455
5456  let description = [{
5457The tensor `keys` must of the same type as the keys of the table.
5458The output `values` is of the type of the table values.
5459
5460The scalar `default_value` is the value output for keys not present in the
5461table. It must also be of the same type as the table values.
5462  }];
5463
5464  let arguments = (ins
5465    TFL_ResourceTensor:$hash_table,
5466    TFL_TensorOf<[I32, TFL_Str, I64]>:$keys,
5467    TFL_TensorOf<[F32, I32, TFL_Str, I64]>:$default_value
5468  );
5469
5470  let results = (outs TFL_TensorOf<[F32, I32, TFL_Str, I64]>:$out);
5471}
5472
5473def TFL_HashtableImportOp: TFL_Op<"hashtable_import", []> {
5474  let summary = [{
5475Replaces the contents of the table with the specified keys and values.
5476  }];
5477
5478  let description = [{
5479The tensor `keys` must be of the same type as the keys of the table.
5480The tensor `values` must be of the type of the table values.
5481  }];
5482
5483  let arguments = (ins
5484    TFL_ResourceTensor:$hash_table,
5485    TFL_TensorOf<[I32, TFL_Str, I64]>:$keys,
5486    TFL_TensorOf<[F32, I32, TFL_Str, I64]>:$values
5487  );
5488
5489  let results = (outs);
5490}
5491
5492
5493def TFL_HashtableSizeOp: TFL_Op<"hashtable_size", []> {
5494  let summary = "Computes the number of elements in the given table.";
5495
5496  let arguments = (ins
5497    TFL_ResourceTensor:$hash_table
5498  );
5499
5500  let results = (outs
5501    TFL_I64Tensor:$out
5502  );
5503}
5504
5505def TFL_BroadcastArgsOp : TFL_Op<"broadcast_args",[
5506    OperandsSameElementTypeConstraintBase<"BroadcastArgs op">,
5507    PredOpTrait<"input and output must have same element type",
5508      TFL_TCresVTEtIsSameAsOp<0, 0>>,
5509    TFL_OperandHasRank<0, 1>,
5510    TFL_OperandHasRank<1, 1>,
5511    NoSideEffect]> {
5512  let summary = "Return the shape of s0 op s1 with broadcast.";
5513
5514  let description = [{
5515Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
5516broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
5517  }];
5518
5519  let arguments = (ins
5520    TFL_I32OrI64Tensor:$s0,
5521    TFL_I32OrI64Tensor:$s1
5522  );
5523
5524  let results = (outs
5525    TFL_I32OrI64Tensor:$r0
5526  );
5527}
5528
5529def TFL_BucketizeOp
5530    : TFL_Op<"bucketize", [NoSideEffect, SameOperandsAndResultShape]> {
5531  let summary = "Bucketizes 'input' based on 'boundaries'.";
5532
5533  let description = [{
5534Example:
5535
5536If the inputs are `boundaries = [0, 10, 100]` and
5537`input = [[-5, 10000][150, 10][5, 100]]`,
5538then the output will be `output = [[0, 3][3, 2][1, 3]]`.
5539  }];
5540
5541  let arguments = (ins
5542    TFL_TensorOf<[F32, F64, I32, I64]>:$input,
5543    F32ArrayAttr:$boundaries
5544  );
5545
5546  let results = (outs
5547    TFL_TensorOf<[I32]>:$output
5548  );
5549
5550  let hasOptions = 1;
5551}
5552
5553def TFL_RandomUniformOp : TFL_Op<"random_uniform", []> {
5554  let summary = "Outputs random values from a uniform distribution.";
5555
5556  let description = [{
5557The generated values follow a uniform distribution in the range `[0, 1)`. The
5558lower bound 0 is included in the range, while the upper bound 1 is excluded.
5559  }];
5560
5561  let arguments = (ins
5562    TFL_I32Tensor:$shape,
5563    DefaultValuedAttr<I64Attr, "0">:$seed,
5564    DefaultValuedAttr<I64Attr, "0">:$seed2
5565  );
5566
5567   let results = (outs
5568    TFL_TensorOf<[F32]>:$out);
5569  let hasOptions = 1;
5570  let customOption = "RandomOptions";
5571}
5572
5573def TFL_RandomStandardNormalOp : TFL_Op<"random_standard_normal", []> {
5574  let summary = "Outputs random values from a normal distribution.";
5575
5576  let description = [{
5577The generated values will have mean 0 and standard deviation 1.
5578  }];
5579
5580  let arguments = (ins
5581    TFL_I32Tensor:$shape,
5582    DefaultValuedAttr<I64Attr, "0">:$seed,
5583    DefaultValuedAttr<I64Attr, "0">:$seed2
5584  );
5585
5586  let results = (outs
5587    TFL_TensorOf<[F32]>:$out);
5588  let hasOptions = 1;
5589  let customOption = "RandomOptions";
5590}
5591
5592def TFL_MultinomialOp : TFL_Op<"multinomial", []> {
5593  let summary = "Draws samples from a categorical distribution.";
5594
5595  let description = [{
5596The generated values will have a categorical distribution based on the `logits`
5597or unnormalized log-probabilities provided for all classes.
5598  }];
5599
5600  let arguments = (ins
5601    TFL_FpTensor:$logits,
5602    TFL_I32Tensor:$num_samples,
5603    DefaultValuedAttr<I64Attr, "0">:$seed,
5604    DefaultValuedAttr<I64Attr, "0">:$seed2
5605  );
5606
5607  let results = (outs
5608    TFL_TensorOf<[I32, I64]>:$out);
5609
5610  let hasOptions = 1;
5611  let customOption = "RandomOptions";
5612}
5613
5614def TFL_NoValueOp : Op<TFL_Dialect, "no_value", [ConstantLike, NoSideEffect]> {
5615  let summary = "constant representing no value.";
5616
5617  let description = [{
5618No value constant op.
5619  }];
5620
5621  let arguments = (ins UnitAttr:$value);
5622
5623  let results = (outs NoneType:$none_val);
5624
5625  let hasFolder = 1;
5626
5627  let extraClassDeclaration = [{
5628    /// Returns true if a constant operation can be built with the given value
5629    /// and result type.
5630    static bool isBuildableWith(Attribute value, Type type);
5631  }];
5632}
5633
5634def TFL_ControlNodeOp : Op<TFL_Dialect, "control_node",
5635    [HasParent<"mlir::func::FuncOp">, RecursiveSideEffects, SingleBlockImplicitTerminator<"YieldOp">]> {
5636  let summary = [{
5637    The `TFL.control_node` operation wraps single-block operations in order to attach control edges.
5638  }];
5639
5640  let description = [{
5641    This is used to wrap regions and attach control dependencies to them. Typically,
5642    this will happen in one of the last steps before emitting the flatbuffer model
5643    in order to enable optimizations that rely on a fixed order of operations (such
5644    as rematerialization.)
5645    The flatbuffer exporter will unwrap the wrapped region and annotate the generated
5646    model with metadata such that any runtime reorderings will respect the order
5647    given by the control dependencies.
5648  }];
5649
5650  // May take values of type !tfl.control as inputs
5651  let arguments = (ins
5652    Variadic<TFL_Control>:$controlInputs
5653  );
5654
5655  // Adds a single value of type !tfl.control as the last output (all other
5656  // outputs are copied from the wrapped region.)
5657  let results = (outs
5658    Variadic<AnyTensor>:$outputs,
5659    TFL_Control:$control
5660  );
5661
5662  let regions = (region SizedRegion<1>:$body);
5663
5664  let extraClassDeclaration = [{
5665    Block &GetBody() { return getOperation()->getRegion(0).front(); }
5666    YieldOp GetYield();
5667    bool WrapsSinglePerfectlyForwardedOp();
5668  }];
5669
5670  let hasCanonicalizer = 0;
5671  let hasVerifier = 1;
5672  let hasFolder = 0;
5673  let hasCustomAssemblyFormat = 1;
5674}
5675
5676#endif // TFL_OPS
5677
5678// LINT.ThenChange()
5679// LINT.ThenChange(//tensorflow/lite/tools/versioning/op_version.cc)
5680