xref: /aosp_15_r20/external/mesa3d/src/gallium/targets/teflon/tflite-schema-v2.15.0.fbs (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Revision History
16// Version 0: Initial version.
17// Version 1: Add subgraphs to schema.
18// Version 2: Rename operators to conform to NN API.
19// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20// Version 3a: Add new builtin op code field. Has backward compatibility with
21//             version 3.
22// Version 3b: Rename fields in SignatureDef. Has backward compatibility with
23//             version 3 and 3a.
24// Version 3c: Move constant tensor buffers & custom op buffers outside from
25//             Flatbuffers. Has backward compatibility with version 3, 3a and
26//             3b.
27
28namespace tflite;
29
30// This corresponds to the version.
31file_identifier "TFL3";
32// File extension of any written files.
33file_extension "tflite";
34
35// IMPORTANT: All new members of tables, enums and unions must be added at the
36// end to ensure backwards compatibility.
37
38// The type of data stored in a tensor.
39enum TensorType : byte {
40  FLOAT32 = 0,
41  FLOAT16 = 1,
42  INT32 = 2,
43  UINT8 = 3,
44  INT64 = 4,
45  STRING = 5,
46  BOOL = 6,
47  INT16 = 7,
48  COMPLEX64 = 8,
49  INT8 = 9,
50  FLOAT64 = 10,
51  COMPLEX128 = 11,
52  UINT64 = 12,
53  // Experimental: Resource and variant types are experimental, that are subject
54  // to change. Do not implement custom kernels using resource & variant types
55  // now.
56  RESOURCE = 13,
57  VARIANT = 14,
58  UINT32 = 15,
59  UINT16 = 16,
60  INT4 = 17,
61}
62
63// Custom quantization parameters for experimenting with new quantization
64// techniques.
65table CustomQuantization {
66  custom:[ubyte] (force_align: 16);
67}
68
69// Represents a specific quantization technique's parameters.
70union QuantizationDetails {
71  CustomQuantization,
72}
73
74// Parameters for converting a quantized tensor back to float.
75table QuantizationParameters {
76  // These four parameters are the asymmetric linear quantization parameters.
77  // Given a quantized value q, the corresponding float value f should be:
78  //   f = scale * (q - zero_point)
79  // For other quantization types, the QuantizationDetails below is used.
80  min:[float];  // For importing back into tensorflow.
81  max:[float];  // For importing back into tensorflow.
82  scale:[float];  // For dequantizing the tensor's values.
83  zero_point:[long];
84
85  // If this is not none, the other quantization parameters (i.e. min, max,
86  // scale, zero_point fields above) are ignored and the value of the
87  // QuantizationDetails union should be used.
88  details:QuantizationDetails;
89
90  // Specifies the dimension of the Tensor's shape that the scales and
91  // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
92  // with quantization params:
93  //   scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
94  // will be quantized across the second dimension of t.
95  //   t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
96  //   t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
97  //   t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
98  quantized_dimension:int;
99}
100
101// Sparse tensors.
102// We use a modification of the TACO format.
103// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
104//
105// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
106// potentially with a k-dimensional block (0 <= k <= n) with dims
107// (dn, ..., dn+k-1), the format needs to specify:
108//   1. In what order to traverse these dimensions. For example, to store a 2-D
109//      matrix in row major order, the traversal order would be (d0, d1),
110//      whereas to store it in column major order, the traversal order would be
111//      (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
112//      could be (d0, d1, d2, d3).
113//   2. How each block dimension in (dn, ..., dn+k-1) maps to the original
114//      tensor dimension in (d0, ..., dn-1).
115//   3. In the traversal order defined above, the format (dense vs. sparse) and
116//      index metadata for each dimension. For a dense dimension, this is just
117//      the size of that dimension. For a sparse dimension, it's the same as
118//      the compressed index defined in the Compressed Sparse Row (CSR) format.
119//      (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
120
121// The storage type for a dimension. Currently we support:
122//   1. DENSE: each coordinate in this dimension is stored implicitly.
123//   2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
124//      compression technique is the same what CSR uses.
125// More types like a sparse dimension with a different compression technique
126// could be added to the list in the future.
127enum DimensionType : byte {
128  DENSE = 0,
129  SPARSE_CSR = 1,
130}
131
132table Int32Vector {
133  values:[int];
134}
135
136table Uint16Vector {
137  values:[ushort] (force_align: 4);
138}
139
140table Uint8Vector {
141  values:[ubyte] (force_align: 4);
142}
143
144// Variable-typed buffer to store the index metadata for a sparse dimension.
145// The widest type is Int32 instead of UInt32 because tensor's shape is a int32
146// vector. We don't want the per-dimensional index to overflow that range.
147union SparseIndexVector {
148  Int32Vector,
149  Uint16Vector,
150  Uint8Vector
151}
152
153table DimensionMetadata {
154  // Whether a dimension is dense or sparse.
155  format:DimensionType;
156  // Index metadata used for a dimension.
157  //   - If format is DimensionType.DENSE then we use the dense_size field to
158  //     store the size of that dimension. Each index in that dimension is
159  //     stored implicitly.
160  //   - If format is DimensionType.SPARSE_CSR then we use array_segments and
161  //     array_indices to encode that dimension. array_segments represents how
162  //     to segment the indices array, each segment corresponds to one element
163  //     in the previous dimension. array_indices represents the index of the
164  //     non-zero elements within this dimension (as those in the CSR matrix
165  //     format, where the first array is row pointers and the second array is
166  //     column indices).
167  dense_size:int;
168  array_segments:SparseIndexVector;
169  array_indices:SparseIndexVector;
170}
171
172// Parameters to encode a sparse TfLite tensor.
173table SparsityParameters {
174  // The traversal order of the dimensions defined in the `shape` field of the
175  // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
176  // ..., dn-1),
177  //   - if not block sparse, the traversal_order is just a permutation of (d0,
178  //     ..., dn-1). For example, a 2-D matrix stored in row-major order would
179  //     have traversal_order = (d0, d1).
180  //   - if block sparse with a k-dimensional block (0 <= k <= n), the
181  //     traversal_order has n + k elements. The first n elements are still a
182  //     permutation of (d0, ..., dn-1). The lask k elements are a permutation
183  //     of (dn, ..., dn+k-1), defining how to traverse a block internally. For
184  //     example, a 2-D matrix with 2-D blocks, both stored in row-major order
185  //     would have traversal_order = (d0, d1, d2, d3).
186  traversal_order:[int];
187  // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
188  // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
189  // tensor dimension in (d0, ..., dn).
190  // It's stored in the order of (dn, ..., dn+k-1).
191  // If not block-sparse, this field is NULL.
192  block_map:[int];
193  // In the traversal order defined above, the metadata needed for
194  // each dimension to locate the non-zero values in the original dense tensor.
195  // The size of the dim_metadata array = the size of the traversal_order array
196  // = n + k.
197  dim_metadata:[DimensionMetadata];
198}
199
200// The nested tensor type for VARIANT type.
201table VariantSubType {
202  // The tensor shape.
203  shape:[int];
204  type:TensorType;
205  // If false, the rank or the number of tensor dimensions is unknown.
206  // If false, "shape" must be [].
207  has_rank: bool = false;
208}
209
210table Tensor {
211  // The tensor shape. The meaning of each entry is operator-specific but
212  // builtin ops use: [batch size, height, width, number of channels] (That's
213  // Tensorflow's NHWC).
214  shape:[int];
215  type:TensorType;
216  // An index that refers to the buffers table at the root of the model. Or,
217  // if there is no data buffer associated (i.e. intermediate results), then
218  // this is 0 (which refers to an always existent empty buffer).
219  //
220  // The data_buffer itself is an opaque container, with the assumption that the
221  // target device is little-endian. In addition, all builtin operators assume
222  // the memory is ordered such that if `shape` is [4, 3, 2], then index
223  // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
224  buffer:uint;
225  name:string;  // For debugging and importing back into tensorflow.
226  quantization:QuantizationParameters;  // Optional.
227
228  is_variable:bool = false;
229
230  // Parameters to encode a sparse tensor. See the example in
231  // tensorflow/lite/testdata/sparse_tensor.json.
232  sparsity:SparsityParameters;  // Optional.
233
234  // Encodes `shape` with unknown dimensions. Unknown dimensions are
235  // represented with -1.
236  shape_signature:[int]; // Optional.
237
238  // This field is added to distinguish between scalars and tensors of unknown
239  // ranks (both of which shape is []).
240  // For scalars (rank = 0), shape = [] and has_rank = true.
241  // For tensors with known rank (rank > 0) and shape, shape = [...] and
242  // has_rank = true.
243  // For tensors with unknown rank and shape, shape = [] and has_rank = false.
244  has_rank: bool = false;
245
246  // The nested Tensor types for VARIANT type. This is always empty for
247  // non-VARIANT types. This is optional because the nested type can be omitted.
248  // Currently only 1 subtype is supported. The field is defined as an array for
249  // flexibility of supporting multiple subtypes in the future.
250  variant_tensors:[VariantSubType];
251}
252
253// A list of builtin operators. Builtin operators are slightly faster than custom
254// ones, but not by much. Moreover, while custom operators accept an opaque
255// object containing configuration parameters, builtins have a predetermined
256// set of acceptable options.
257// LINT.IfChange
258enum BuiltinOperator : int32 {
259  ADD = 0,
260  AVERAGE_POOL_2D = 1,
261  CONCATENATION = 2,
262  CONV_2D = 3,
263  DEPTHWISE_CONV_2D = 4,
264  DEPTH_TO_SPACE = 5,
265  DEQUANTIZE = 6,
266  EMBEDDING_LOOKUP = 7,
267  FLOOR = 8,
268  FULLY_CONNECTED = 9,
269  HASHTABLE_LOOKUP = 10,
270  L2_NORMALIZATION = 11,
271  L2_POOL_2D = 12,
272  LOCAL_RESPONSE_NORMALIZATION = 13,
273  LOGISTIC = 14,
274  LSH_PROJECTION = 15,
275  LSTM = 16,
276  MAX_POOL_2D = 17,
277  MUL = 18,
278  RELU = 19,
279  // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
280  // since different model developers use RELU1 in different ways. Never
281  // create another op called RELU1.
282  RELU_N1_TO_1 = 20,
283  RELU6 = 21,
284  RESHAPE = 22,
285  RESIZE_BILINEAR = 23,
286  RNN = 24,
287  SOFTMAX = 25,
288  SPACE_TO_DEPTH = 26,
289  SVDF = 27,
290  TANH = 28,
291  CONCAT_EMBEDDINGS = 29,
292  SKIP_GRAM = 30,
293  CALL = 31,
294  CUSTOM = 32,
295  EMBEDDING_LOOKUP_SPARSE = 33,
296  PAD = 34,
297  UNIDIRECTIONAL_SEQUENCE_RNN = 35,
298  GATHER = 36,
299  BATCH_TO_SPACE_ND = 37,
300  SPACE_TO_BATCH_ND = 38,
301  TRANSPOSE = 39,
302  MEAN = 40,
303  SUB = 41,
304  DIV = 42,
305  SQUEEZE = 43,
306  UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
307  STRIDED_SLICE = 45,
308  BIDIRECTIONAL_SEQUENCE_RNN = 46,
309  EXP = 47,
310  TOPK_V2 = 48,
311  SPLIT = 49,
312  LOG_SOFTMAX = 50,
313  // DELEGATE is a special op type for the operations which are delegated to
314  // other backends.
315  // WARNING: Experimental interface, subject to change
316  DELEGATE = 51,
317  BIDIRECTIONAL_SEQUENCE_LSTM = 52,
318  CAST = 53,
319  PRELU = 54,
320  MAXIMUM = 55,
321  ARG_MAX = 56,
322  MINIMUM = 57,
323  LESS = 58,
324  NEG = 59,
325  PADV2 = 60,
326  GREATER = 61,
327  GREATER_EQUAL = 62,
328  LESS_EQUAL = 63,
329  SELECT = 64,
330  SLICE = 65,
331  SIN = 66,
332  TRANSPOSE_CONV = 67,
333  SPARSE_TO_DENSE = 68,
334  TILE = 69,
335  EXPAND_DIMS = 70,
336  EQUAL = 71,
337  NOT_EQUAL = 72,
338  LOG = 73,
339  SUM = 74,
340  SQRT = 75,
341  RSQRT = 76,
342  SHAPE = 77,
343  POW = 78,
344  ARG_MIN = 79,
345  FAKE_QUANT = 80,
346  REDUCE_PROD = 81,
347  REDUCE_MAX = 82,
348  PACK = 83,
349  LOGICAL_OR = 84,
350  ONE_HOT = 85,
351  LOGICAL_AND = 86,
352  LOGICAL_NOT = 87,
353  UNPACK = 88,
354  REDUCE_MIN = 89,
355  FLOOR_DIV = 90,
356  REDUCE_ANY = 91,
357  SQUARE = 92,
358  ZEROS_LIKE = 93,
359  FILL = 94,
360  FLOOR_MOD = 95,
361  RANGE = 96,
362  RESIZE_NEAREST_NEIGHBOR = 97,
363  LEAKY_RELU = 98,
364  SQUARED_DIFFERENCE = 99,
365  MIRROR_PAD = 100,
366  ABS = 101,
367  SPLIT_V = 102,
368  UNIQUE = 103,
369  CEIL = 104,
370  REVERSE_V2 = 105,
371  ADD_N = 106,
372  GATHER_ND = 107,
373  COS = 108,
374  WHERE = 109,
375  RANK = 110,
376  ELU = 111,
377  REVERSE_SEQUENCE = 112,
378  MATRIX_DIAG = 113,
379  QUANTIZE = 114,
380  MATRIX_SET_DIAG = 115,
381  ROUND = 116,
382  HARD_SWISH = 117,
383  IF = 118,
384  WHILE = 119,
385  NON_MAX_SUPPRESSION_V4 = 120,
386  NON_MAX_SUPPRESSION_V5 = 121,
387  SCATTER_ND = 122,
388  SELECT_V2 = 123,
389  DENSIFY = 124,
390  SEGMENT_SUM = 125,
391  BATCH_MATMUL = 126,
392  PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
393  CUMSUM = 128,
394  CALL_ONCE = 129,
395  BROADCAST_TO = 130,
396  RFFT2D = 131,
397  CONV_3D = 132,
398  IMAG=133,
399  REAL=134,
400  COMPLEX_ABS=135,
401  HASHTABLE = 136,
402  HASHTABLE_FIND = 137,
403  HASHTABLE_IMPORT = 138,
404  HASHTABLE_SIZE = 139,
405  REDUCE_ALL = 140,
406  CONV_3D_TRANSPOSE = 141,
407  VAR_HANDLE = 142,
408  READ_VARIABLE = 143,
409  ASSIGN_VARIABLE = 144,
410  BROADCAST_ARGS = 145,
411  RANDOM_STANDARD_NORMAL = 146,
412  BUCKETIZE = 147,
413  RANDOM_UNIFORM = 148,
414  MULTINOMIAL = 149,
415  GELU = 150,
416  DYNAMIC_UPDATE_SLICE = 151,
417  RELU_0_TO_1 = 152,
418  UNSORTED_SEGMENT_PROD = 153,
419  UNSORTED_SEGMENT_MAX = 154,
420  UNSORTED_SEGMENT_SUM = 155,
421  ATAN2 = 156,
422  UNSORTED_SEGMENT_MIN = 157,
423  SIGN = 158,
424  BITCAST = 159,
425  BITWISE_XOR = 160,
426  RIGHT_SHIFT = 161,
427  // All Operators start with STABLEHLO_ prefixes are subject to change
428  // Many of the ops below can not be executed by TFlite runtime
429  STABLEHLO_LOGISTIC = 162, // WARNING: Do not have runtime support
430  STABLEHLO_ADD = 163, // WARNING: No runtime support yet
431  STABLEHLO_DIVIDE = 164, // WARNING: No runtime support yet
432  STABLEHLO_MULTIPLY = 165, // WARNING: No runtime support yet
433  STABLEHLO_MAXIMUM = 166, // WARNING: No runtime support yet
434  STABLEHLO_RESHAPE = 167, // WARNING: No runtime support yet
435  STABLEHLO_CLAMP = 168, // WARNING: No runtime support
436  STABLEHLO_CONCATENATE = 169, // WARNING: No runtime support
437  STABLEHLO_BROADCAST_IN_DIM = 170, // WARNING: No runtime support
438  STABLEHLO_CONVOLUTION = 171, // WARNING: No runtime support
439  STABLEHLO_SLICE = 172, // WARNING: No runtime support
440  STABLEHLO_CUSTOM_CALL = 173, // WARNING: No runtime support
441  STABLEHLO_REDUCE = 174, // WARNING: No runtime support
442  STABLEHLO_ABS = 175, // WARNING: No runtime support
443  STABLEHLO_AND = 176, // WARNING: No runtime support
444  STABLEHLO_COSINE = 177, // WARNING: No runtime support
445  STABLEHLO_EXPONENTIAL = 178, // WARNING: No runtime support
446  STABLEHLO_FLOOR = 179, // WARNING: No runtime support
447  STABLEHLO_LOG = 180, // WARNING: No runtime support
448  STABLEHLO_MINIMUM = 181, // WARNING: No runtime support
449  STABLEHLO_NEGATE = 182, // WARNING: No runtime support
450  STABLEHLO_OR = 183, // WARNING: No runtime support
451  STABLEHLO_POWER = 184, // WARNING: No runtime support
452  STABLEHLO_REMAINDER = 185, // WARNING: No runtime support
453  STABLEHLO_RSQRT = 186, // WARNING: No runtime support
454  STABLEHLO_SELECT = 187, // WARNING: No runtime support
455  STABLEHLO_SUBTRACT = 188, // WARNING: No runtime support
456  STABLEHLO_TANH = 189, // WARNING: No runtime support
457  STABLEHLO_SCATTER = 190,
458  STABLEHLO_COMPARE = 191, // WARNING: No runtime support
459  STABLEHLO_CONVERT = 192, // WARNING: No runtime support
460  STABLEHLO_DYNAMIC_SLICE = 193, // WARNING: No runtime support
461  STABLEHLO_DYNAMIC_UPDATE_SLICE = 194, // WARNING: No runtime support
462  STABLEHLO_PAD = 195, // WARNING: No runtime support
463  STABLEHLO_IOTA = 196, // WARNING: No runtime support
464  STABLEHLO_DOT_GENERAL = 197, // WARNING: No runtime support
465  STABLEHLO_REDUCE_WINDOW = 198, // WARNING: No runtime support
466  STABLEHLO_SORT = 199, // WARNING: No runtime support
467  STABLEHLO_WHILE = 200, // WARNING: No runtime support
468  STABLEHLO_GATHER = 201, // WARNING: No runtime support
469  STABLEHLO_TRANSPOSE = 202, // WARNING: No runtime support
470  DILATE = 203,
471  STABLEHLO_RNG_BIT_GENERATOR = 204,
472  REDUCE_WINDOW = 205,
473}
474// LINT.ThenChange(nnapi_linter/linter.proto)
475
476// Options for the builtin operators.
477union BuiltinOptions {
478  Conv2DOptions,
479  DepthwiseConv2DOptions,
480  ConcatEmbeddingsOptions,
481  LSHProjectionOptions,
482  Pool2DOptions,
483  SVDFOptions,
484  RNNOptions,
485  FullyConnectedOptions,
486  SoftmaxOptions,
487  ConcatenationOptions,
488  AddOptions,
489  L2NormOptions,
490  LocalResponseNormalizationOptions,
491  LSTMOptions,
492  ResizeBilinearOptions,
493  CallOptions,
494  ReshapeOptions,
495  SkipGramOptions,
496  SpaceToDepthOptions,
497  EmbeddingLookupSparseOptions,
498  MulOptions,
499  PadOptions,
500  GatherOptions,
501  BatchToSpaceNDOptions,
502  SpaceToBatchNDOptions,
503  TransposeOptions,
504  ReducerOptions,
505  SubOptions,
506  DivOptions,
507  SqueezeOptions,
508  SequenceRNNOptions,
509  StridedSliceOptions,
510  ExpOptions,
511  TopKV2Options,
512  SplitOptions,
513  LogSoftmaxOptions,
514  CastOptions,
515  DequantizeOptions,
516  MaximumMinimumOptions,
517  ArgMaxOptions,
518  LessOptions,
519  NegOptions,
520  PadV2Options,
521  GreaterOptions,
522  GreaterEqualOptions,
523  LessEqualOptions,
524  SelectOptions,
525  SliceOptions,
526  TransposeConvOptions,
527  SparseToDenseOptions,
528  TileOptions,
529  ExpandDimsOptions,
530  EqualOptions,
531  NotEqualOptions,
532  ShapeOptions,
533  PowOptions,
534  ArgMinOptions,
535  FakeQuantOptions,
536  PackOptions,
537  LogicalOrOptions,
538  OneHotOptions,
539  LogicalAndOptions,
540  LogicalNotOptions,
541  UnpackOptions,
542  FloorDivOptions,
543  SquareOptions,
544  ZerosLikeOptions,
545  FillOptions,
546  BidirectionalSequenceLSTMOptions,
547  BidirectionalSequenceRNNOptions,
548  UnidirectionalSequenceLSTMOptions,
549  FloorModOptions,
550  RangeOptions,
551  ResizeNearestNeighborOptions,
552  LeakyReluOptions,
553  SquaredDifferenceOptions,
554  MirrorPadOptions,
555  AbsOptions,
556  SplitVOptions,
557  UniqueOptions,
558  ReverseV2Options,
559  AddNOptions,
560  GatherNdOptions,
561  CosOptions,
562  WhereOptions,
563  RankOptions,
564  ReverseSequenceOptions,
565  MatrixDiagOptions,
566  QuantizeOptions,
567  MatrixSetDiagOptions,
568  HardSwishOptions,
569  IfOptions,
570  WhileOptions,
571  DepthToSpaceOptions,
572  NonMaxSuppressionV4Options,
573  NonMaxSuppressionV5Options,
574  ScatterNdOptions,
575  SelectV2Options,
576  DensifyOptions,
577  SegmentSumOptions,
578  BatchMatMulOptions,
579  CumsumOptions,
580  CallOnceOptions,
581  BroadcastToOptions,
582  Rfft2dOptions,
583  Conv3DOptions,
584  HashtableOptions,
585  HashtableFindOptions,
586  HashtableImportOptions,
587  HashtableSizeOptions,
588  VarHandleOptions,
589  ReadVariableOptions,
590  AssignVariableOptions,
591  RandomOptions,
592  BucketizeOptions,
593  GeluOptions,
594  DynamicUpdateSliceOptions,
595  UnsortedSegmentProdOptions,
596  UnsortedSegmentMaxOptions,
597  UnsortedSegmentMinOptions,
598  UnsortedSegmentSumOptions,
599  ATan2Options,
600  SignOptions,
601  BitcastOptions,
602  BitwiseXorOptions,
603  RightShiftOptions,
604  // DO NOT add new options this union, will cause failure in Java api
605  // generation otherwise
606  // Add new builtin options into builtin options 2 instead
607}
608
609union BuiltinOptions2{
610  StablehloConcatenateOptions,
611  StablehloBroadcastInDimOptions,
612  StablehloSliceOptions,
613  StablehloConvolutionOptions,
614  StablehloCustomCallOptions,
615  StablehloReduceOptions,
616  StablehloScatterOptions,
617  StablehloCompareOptions,
618  StablehloDynamicSliceOptions,
619  StablehloPadOptions,
620  StablehloIotaOptions,
621  StablehloDotGeneralOptions,
622  StablehloReduceWindowOptions,
623  StablehloSortOptions,
624  StablehloWhileOptions,
625  StablehloGatherOptions,
626  StablehloTransposeOptions,
627  DilateOptions,
628  StablehloRngBitGeneratorOptions,
629  ReduceWindowOptions,
630}
631
632table StablehloGatherOptions{
633  offset_dims : [long];
634  collapsed_slice_dims : [long];
635  start_index_map : [long];
636  index_vector_dim : long;
637  slice_sizes : [long];
638  indices_are_sorted : bool;
639}
640
641table StablehloTransposeOptions{
642  permutation : [long];
643}
644
645enum StablehloPrecisionConfig : uint {
646  DEFAULT,
647  HIGH,
648  HIGHEST,
649}
650
651table StablehloDotGeneralOptions{
652  lhs_batching_dimensions : [long];
653  rhs_batching_dimensions : [long];
654  lhs_contracting_dimensions : [long];
655  rhs_contracting_dimensions : [long];
656  precision_config : [StablehloPrecisionConfig];
657}
658
659table StablehloReduceWindowOptions{
660  window_dimensions : [long];
661  window_strides : [long];
662  base_dilations : [long];
663  window_dilations : [long];
664  padding : [long];
665  body_subgraph_index : int;
666}
667
668table StablehloWhileOptions{
669  cond_subgraph_index : int;
670  body_subgraph_index : int;
671}
672
673table StablehloSortOptions{
674  dimension : long;
675  is_stable : bool;
676  comparator_subgraph_index : int;
677}
678
679table StablehloConcatenateOptions {
680  dimension : long;
681}
682
683table StablehloBroadcastInDimOptions{
684  broadcast_dimensions : [long];
685}
686
687enum StablehloComparisonDirection : uint {
688  STABLEHLO_COMPARISON_DIRECTION_EQ,
689  STABLEHLO_COMPARISON_DIRECTION_NE,
690  STABLEHLO_COMPARISON_DIRECTION_GE,
691  STABLEHLO_COMPARISON_DIRECTION_GT,
692  STABLEHLO_COMPARISON_DIRECTION_LE,
693  STABLEHLO_COMPARISON_DIRECTION_LT,
694
695}
696
697enum StablehloComparisonType : uint {
698  STABLEHLO_COMPARISON_TYPE_NOTYPE,
699  STABLEHLO_COMPARISON_TYPE_FLOAT,
700  STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER,
701  STABLEHLO_COMPARISON_TYPE_SIGNED,
702  STABLEHLO_COMPARISON_TYPE_UNSIGNED,
703}
704
705table StablehloCompareOptions{
706  comparison_direction : StablehloComparisonDirection;
707  compare_type : StablehloComparisonType;
708}
709
710table StablehloDynamicSliceOptions{
711  slice_sizes : [long];
712}
713
714table StablehloPadOptions{
715  edge_padding_low : [long];
716  edge_padding_high : [long];
717  interior_padding : [long];
718}
719
720table StablehloIotaOptions{
721  iota_dimension : long;
722}
723
724table StablehloCustomCallOptions {
725  call_target_name : string;
726  has_side_effect : bool;
727  backend_config: string;
728  api_version : int; // will be decprecated
729  called_computations: [int]; // should point to subgraphs of the computations
730  custom_attributes : [ubyte];
731}
732
733table StablehloReduceOptions {
734  dimensions : [long];
735  body_subgraph_index : int;
736}
737
738table StablehloSliceOptions{
739  start_indices : [long];
740  limit_indices : [long];
741  strides : [long];
742}
743
744table StablehloConvolutionOptions{
745  window_strides : [long];
746  padding : [long];
747  lhs_dilation : [long];
748  rhs_dilation : [long];
749  window_reversal : [bool];
750  input_batch_dimension : long;
751  input_feature_dimension : long;
752  input_spatial_dimensions : [long];
753  kernel_input_feature_dimension : long;
754  kernel_output_feature_dimension : long;
755  kernel_spatial_dimensions : [long];
756  output_batch_dimension : long;
757  output_feature_dimension : long;
758  output_spatial_dimensions	: [long];
759  feature_group_count : long;
760  batch_group_count : long;
761  precision_config : [StablehloPrecisionConfig];
762}
763
764table StablehloScatterOptions {
765  indices_are_sorted: bool;
766  update_window_dims: [long];
767  inserted_window_dims: [long];
768  scatter_dims_to_operand_dims: [long];
769  index_vector_dim: long;
770  unique_indices: bool;
771  update_computation_subgraph_index: int;
772}
773
774enum RngAlgorithm : byte {
775  // An algorithm auto-selected by the system according to device type.
776  DEFAULT = 0,
777  // The Philox algorithm, as described in paper
778  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
779  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
780  PHILOX = 1,
781  // The ThreeFry algorithm, as described in paper
782  // ['Parallel Random Numbers: As Easy as 1, 2, 3']
783  // (https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
784  THREEFRY = 2,
785}
786
787table StablehloRngBitGeneratorOptions {
788  algorithm:RngAlgorithm;
789}
790
791// LINT.IfChange
792enum Padding : byte { SAME, VALID }
793// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
794
795// LINT.IfChange
796enum ActivationFunctionType : byte {
797  NONE = 0,
798  RELU = 1,
799  RELU_N1_TO_1 = 2,
800  RELU6 = 3,
801  TANH = 4,
802  SIGN_BIT = 5,
803}
804// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
805
806table Conv2DOptions {
807  padding:Padding;
808  stride_w:int;
809  stride_h:int;
810  fused_activation_function:ActivationFunctionType;
811  dilation_w_factor:int = 1;
812  dilation_h_factor:int = 1;
813  // Parameters for Conv2D version 8 or above.
814  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
815  quantized_bias_type: TensorType;
816}
817
818// Options for both Conv3D and Conv3DTranspose.
819table Conv3DOptions {
820  padding:Padding;
821  stride_d:int;
822  stride_w:int;
823  stride_h:int;
824  fused_activation_function:ActivationFunctionType;
825  dilation_d_factor:int = 1;
826  dilation_w_factor:int = 1;
827  dilation_h_factor:int = 1;
828}
829
830table Pool2DOptions {
831  padding:Padding;
832  stride_w:int;
833  stride_h:int;
834  filter_width:int;
835  filter_height:int;
836  fused_activation_function:ActivationFunctionType;
837}
838
839table DepthwiseConv2DOptions {
840  // Parameters for DepthwiseConv version 1 or above.
841  padding:Padding;
842  stride_w:int;
843  stride_h:int;
844  // `depth_multiplier` is redundant. It's used by CPU kernels in
845  // TensorFlow 2.0 or below, but ignored in versions above.
846  // See comments in lite/c/builtin_op_data.h for more details.
847  depth_multiplier:int;
848  fused_activation_function:ActivationFunctionType;
849  // Parameters for DepthwiseConv version 2 or above.
850  dilation_w_factor:int = 1;
851  dilation_h_factor:int = 1;
852}
853
854table ConcatEmbeddingsOptions {
855  num_channels:int;
856  num_columns_per_channel:[int];
857  embedding_dim_per_channel:[int]; // This could be inferred from parameters.
858}
859
860enum LSHProjectionType: byte {
861  UNKNOWN = 0,
862  SPARSE = 1,
863  DENSE = 2,
864}
865
866table LSHProjectionOptions {
867  type: LSHProjectionType;
868}
869
870table SVDFOptions {
871  rank:int;
872  fused_activation_function:ActivationFunctionType;
873  // For weights-only quantization, use asymmetric quantization for non
874  // constant inputs at evaluation time.
875  asymmetric_quantize_inputs:bool;
876}
877
878// An implementation of TensorFlow RNNCell.
879table RNNOptions {
880  fused_activation_function:ActivationFunctionType;
881  asymmetric_quantize_inputs:bool;
882}
883
884// An implementation of TensorFlow dynamic_rnn with RNNCell.
885table SequenceRNNOptions {
886  time_major:bool;
887  fused_activation_function:ActivationFunctionType;
888  asymmetric_quantize_inputs:bool;
889}
890
891// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
892table BidirectionalSequenceRNNOptions {
893  time_major:bool;
894  fused_activation_function:ActivationFunctionType;
895  merge_outputs: bool;
896  asymmetric_quantize_inputs:bool;
897}
898
899// LINT.IfChange
900enum FullyConnectedOptionsWeightsFormat: byte {
901  DEFAULT = 0,
902  SHUFFLED4x16INT8 = 1,
903}
904// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
905
906// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
907table FullyConnectedOptions {
908  // Parameters for FullyConnected version 1 or above.
909  fused_activation_function:ActivationFunctionType;
910
911  // Parameters for FullyConnected version 2 or above.
912  weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
913
914  // Parameters for FullyConnected version 5 or above.
915  // If set to true, then the number of dimension is preserved. Furthermore,
916  // all but the last dimension of the input and output shapes will be equal.
917  keep_num_dims: bool;
918
919  // Parameters for FullyConnected version 7 or above.
920  // If set to true, then weights-only op will use asymmetric quantization for
921  // inputs.
922  asymmetric_quantize_inputs: bool;
923
924  // Parameters for FullyConnected version 11 or above.
925  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
926  quantized_bias_type: TensorType;
927}
928
929table SoftmaxOptions {
930  beta: float;
931}
932
933// An implementation of TensorFlow concat.
934table ConcatenationOptions {
935  axis:int;
936  fused_activation_function:ActivationFunctionType;
937}
938
939table AddOptions {
940  fused_activation_function:ActivationFunctionType;
941  // Parameters supported by version 3.
942  pot_scale_int16:bool = true;
943}
944
945table MulOptions {
946  fused_activation_function:ActivationFunctionType;
947}
948
949table L2NormOptions {
950  // This field is currently ignored in the L2 Norm Op.
951  fused_activation_function:ActivationFunctionType;
952}
953
954table LocalResponseNormalizationOptions {
955  radius:int;
956  bias:float;
957  alpha:float;
958  beta:float;
959}
960
961// LINT.IfChange
962enum LSTMKernelType : byte {
963  // Full LSTM kernel which supports peephole and projection.
964  FULL = 0,
965  // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
966  BASIC = 1,
967}
968// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
969
970// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
971table LSTMOptions {
972  // Parameters for LSTM version 1 or above.
973  fused_activation_function:ActivationFunctionType;
974  cell_clip: float; // Optional, 0.0 means no clipping
975  proj_clip: float; // Optional, 0.0 means no clipping
976
977  // Parameters for LSTM version 2 or above.
978  // Basic kernel is only supported in version 2 or above.
979  kernel_type: LSTMKernelType = FULL;
980
981  // Parameters for LSTM version 4 or above.
982  asymmetric_quantize_inputs: bool;
983}
984
985// An implementation of TensorFlow dynamic_rnn with LSTMCell.
986table UnidirectionalSequenceLSTMOptions {
987  fused_activation_function:ActivationFunctionType;
988  cell_clip: float; // Optional, 0.0 means no clipping
989  proj_clip: float; // Optional, 0.0 means no clipping
990
991  // If true then first dimension is sequence, otherwise batch.
992  time_major:bool;
993
994  // Parameter for Unidirectional Sequence LSTM version 3.
995  asymmetric_quantize_inputs:bool;
996
997  // Parameter for unidirectional sequence RNN version 4.
998  diagonal_recurrent_tensors:bool;
999}
1000
1001table BidirectionalSequenceLSTMOptions {
1002  // Parameters supported by version 1:
1003  fused_activation_function:ActivationFunctionType;
1004  cell_clip: float; // Optional, 0.0 means no clipping
1005  proj_clip: float; // Optional, 0.0 means no clipping
1006
1007  // If true, store the outputs of both directions into the first output.
1008  merge_outputs: bool;
1009
1010  // Parameters supported by version 2:
1011  // If true then first dimension is sequence, otherwise batch.
1012  // Version 1 implementations assumed time_major to be true, so this default
1013  // value should never change.
1014  time_major: bool = true;
1015
1016  // Parameters for version 3 or above.
1017  asymmetric_quantize_inputs:bool;
1018}
1019
1020table ResizeBilinearOptions {
1021  new_height: int (deprecated);
1022  new_width: int (deprecated);
1023  align_corners: bool;
1024  half_pixel_centers: bool;
1025}
1026
1027table ResizeNearestNeighborOptions {
1028  align_corners: bool;
1029  half_pixel_centers: bool;
1030}
1031
1032// A call operation options
1033table CallOptions {
1034  // The subgraph index that needs to be called.
1035  subgraph:uint;
1036}
1037
1038table PadOptions {
1039}
1040
1041table PadV2Options {
1042}
1043
1044table ReshapeOptions {
1045  new_shape:[int];
1046}
1047
1048table SpaceToBatchNDOptions {
1049}
1050
1051table BatchToSpaceNDOptions {
1052}
1053
1054table SkipGramOptions {
1055  ngram_size: int;
1056  max_skip_size: int;
1057  include_all_ngrams: bool;
1058}
1059
1060table SpaceToDepthOptions {
1061  block_size: int;
1062}
1063
1064table DepthToSpaceOptions {
1065  block_size: int;
1066}
1067
1068table SubOptions {
1069  fused_activation_function:ActivationFunctionType;
1070  // Parameters supported by version 5
1071  pot_scale_int16:bool = true;
1072}
1073
1074table DivOptions {
1075  fused_activation_function:ActivationFunctionType;
1076}
1077
1078table TopKV2Options {
1079}
1080
1081enum CombinerType : byte {
1082  SUM = 0,
1083  MEAN = 1,
1084  SQRTN = 2,
1085}
1086
1087table EmbeddingLookupSparseOptions {
1088  combiner:CombinerType;
1089}
1090
1091table GatherOptions {
1092  axis: int;
1093  // Parameters for Gather version 5 or above.
1094  batch_dims: int = 0;
1095}
1096
1097table TransposeOptions {
1098}
1099
1100table ExpOptions {
1101}
1102
1103table CosOptions {
1104}
1105
1106table ReducerOptions {
1107  keep_dims: bool;
1108}
1109
1110table SqueezeOptions {
1111  squeeze_dims:[int];
1112}
1113
1114table SplitOptions {
1115  num_splits: int;
1116}
1117
1118table SplitVOptions {
1119  num_splits: int;
1120}
1121
1122table StridedSliceOptions {
1123  begin_mask: int;
1124  end_mask: int;
1125  ellipsis_mask: int;
1126  new_axis_mask: int;
1127  shrink_axis_mask: int;
1128  // If true, then the end tensor is an offset of the begin tensor.
1129  offset: bool;
1130}
1131
1132table LogSoftmaxOptions {
1133}
1134
1135table CastOptions {
1136  in_data_type: TensorType;
1137  out_data_type: TensorType;
1138}
1139
1140table DequantizeOptions {
1141}
1142
1143table MaximumMinimumOptions {
1144}
1145
1146table TileOptions {
1147}
1148
1149table ArgMaxOptions {
1150  output_type : TensorType;
1151}
1152
1153table ArgMinOptions {
1154  output_type : TensorType;
1155}
1156
1157table GreaterOptions {
1158}
1159
1160table GreaterEqualOptions {
1161}
1162
1163table LessOptions {
1164}
1165
1166table LessEqualOptions {
1167}
1168
1169table NegOptions {
1170}
1171
1172table SelectOptions {
1173}
1174
1175table SliceOptions {
1176}
1177
1178table TransposeConvOptions {
1179  // Parameters supported by version 1, 2, 3:
1180  padding:Padding;
1181  stride_w:int;
1182  stride_h:int;
1183
1184  // Parameters supported by version 4:
1185  fused_activation_function:ActivationFunctionType = NONE;
1186
1187  // Parameters for TransposeConv version 5 or above.
1188  // If set, use this for bias and accumulator.
1189  // When set, quantized_bias_type defines the dtype for both bias and accumulator.
1190  quantized_bias_type: TensorType;
1191}
1192
1193table ExpandDimsOptions {
1194}
1195
1196table SparseToDenseOptions {
1197  validate_indices:bool;
1198}
1199
1200table EqualOptions {
1201}
1202
1203table NotEqualOptions {
1204}
1205
1206table ShapeOptions {
1207  // Optional output type of the operation (int32 or int64). Defaults to int32.
1208  out_type : TensorType;
1209}
1210
1211table RankOptions {
1212}
1213
1214table PowOptions {
1215}
1216
1217table FakeQuantOptions {
1218  // Parameters supported by version 1:
1219  min:float;
1220  max:float;
1221  num_bits:int;
1222
1223  // Parameters supported by version 2:
1224  narrow_range:bool;
1225}
1226
1227table PackOptions {
1228  values_count:int;
1229  axis:int;
1230}
1231
1232table LogicalOrOptions {
1233}
1234
1235table OneHotOptions {
1236  axis:int;
1237}
1238
1239table AbsOptions {
1240}
1241
1242
1243table HardSwishOptions {
1244}
1245
1246table LogicalAndOptions {
1247}
1248
1249table LogicalNotOptions {
1250}
1251
1252table UnpackOptions {
1253  num:int;
1254  axis:int;
1255}
1256
1257table FloorDivOptions {
1258}
1259
1260table SquareOptions {
1261}
1262
1263table ZerosLikeOptions {
1264}
1265
1266table FillOptions {
1267}
1268
1269table FloorModOptions {
1270}
1271
1272table RangeOptions {
1273}
1274
1275table LeakyReluOptions {
1276  alpha:float;
1277}
1278
1279table SquaredDifferenceOptions {
1280}
1281
1282// LINT.IfChange
1283enum MirrorPadMode : byte {
1284  // Doesn't include borders.
1285  REFLECT = 0,
1286  // Includes borders.
1287  SYMMETRIC = 1,
1288}
1289// LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
1290
1291table MirrorPadOptions {
1292  mode:MirrorPadMode;
1293}
1294
1295table UniqueOptions {
1296  idx_out_type:TensorType = INT32;
1297}
1298
1299table ReverseV2Options {
1300}
1301
1302table AddNOptions {
1303}
1304
1305table GatherNdOptions {
1306}
1307
1308table WhereOptions {
1309}
1310
1311table ReverseSequenceOptions {
1312  seq_dim:int;
1313  batch_dim:int = 0;
1314}
1315
1316table MatrixDiagOptions {
1317}
1318
1319table QuantizeOptions {
1320}
1321
1322table MatrixSetDiagOptions {
1323}
1324
1325table IfOptions {
1326  then_subgraph_index:int;
1327  else_subgraph_index:int;
1328}
1329
1330table CallOnceOptions {
1331  init_subgraph_index:int;
1332}
1333
1334table WhileOptions {
1335  cond_subgraph_index:int;
1336  body_subgraph_index:int;
1337}
1338
1339table NonMaxSuppressionV4Options {
1340}
1341
1342table NonMaxSuppressionV5Options {
1343}
1344
1345table ScatterNdOptions {
1346}
1347
1348table SelectV2Options {
1349}
1350
1351table DensifyOptions {
1352}
1353
1354table SegmentSumOptions {
1355}
1356
1357table BatchMatMulOptions {
1358  adj_x:bool;
1359  adj_y:bool;
1360  // Parameters for BatchMatMul version 4 or above.
1361  // If set to true, then weights-only op will use asymmetric quantization for
1362  // inputs.
1363  asymmetric_quantize_inputs: bool;
1364}
1365
1366table CumsumOptions {
1367  exclusive:bool;
1368  reverse:bool;
1369}
1370
1371table BroadcastToOptions {
1372}
1373
1374table Rfft2dOptions {
1375}
1376
1377table HashtableOptions {
1378  // The identity of hash tables. This identity will be used across different
1379  // subgraphs in the same interpreter instance.
1380  table_id:int;
1381  key_dtype:TensorType;
1382  value_dtype:TensorType;
1383}
1384
1385table HashtableFindOptions {
1386}
1387
1388table HashtableImportOptions {
1389}
1390
1391table HashtableSizeOptions {
1392}
1393
1394table VarHandleOptions {
1395  container:string;
1396  shared_name:string;
1397}
1398
1399table ReadVariableOptions {
1400}
1401
1402table AssignVariableOptions {
1403}
1404
1405table RandomOptions {
1406  seed: long;
1407  seed2: long;
1408}
1409
1410table BucketizeOptions {
1411  boundaries: [float];  // The bucket boundaries.
1412}
1413
1414table GeluOptions {
1415  approximate: bool;
1416}
1417
1418table DynamicUpdateSliceOptions {
1419}
1420
1421table UnsortedSegmentProdOptions {
1422}
1423
1424table UnsortedSegmentMaxOptions {
1425}
1426
1427table UnsortedSegmentSumOptions {
1428}
1429
1430table ATan2Options {
1431}
1432
1433table UnsortedSegmentMinOptions{
1434}
1435
1436table SignOptions {
1437}
1438
1439table BitcastOptions {
1440}
1441
1442table BitwiseXorOptions {
1443}
1444
1445table RightShiftOptions {
1446}
1447
1448table DilateOptions {
1449}
1450
1451enum ReduceWindowFunction : int {
1452  UNSUPPORTED,
1453  ADD,
1454  MUL,
1455  MINIMUM,
1456  MAXIMUM,
1457  ALL,
1458  ANY,
1459}
1460
1461table ReduceWindowOptions{
1462  reduce_function: ReduceWindowFunction;
1463}
1464
1465// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1466// builtin, or a string if the operator is custom.
1467table OperatorCode {
1468  // This field is for backward compatibility. This field will be used when
1469  // the value of the extended builtin_code field has less than
1470  // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1471  deprecated_builtin_code:byte;
1472  custom_code:string;
1473
1474  // The version of the operator. The version need to be bumped whenever new
1475  // parameters are introduced into an op.
1476  version:int = 1;
1477
1478  // This field is introduced for resolving op builtin code shortage problem
1479  // (the original BuiltinOperator enum field was represented as a byte).
1480  // This field will be used when the value of the extended builtin_code field
1481  // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1482  builtin_code:BuiltinOperator;
1483}
1484
1485enum CustomOptionsFormat : byte {
1486  FLEXBUFFERS = 0,
1487}
1488
1489// An operator takes tensors as inputs and outputs. The type of operation being
1490// performed is determined by an index into the list of valid OperatorCodes,
1491// while the specifics of each operations is configured using builtin_options
1492// or custom_options.
1493table Operator {
1494  // Index into the operator_codes array. Using an integer here avoids
1495  // complicate map lookups.
1496  opcode_index:uint;
1497
1498  // Optional input are indicated by -1.
1499  inputs:[int];
1500  outputs:[int];
1501
1502  builtin_options:BuiltinOptions;
1503  custom_options:[ubyte];
1504  custom_options_format:CustomOptionsFormat;
1505
1506  // A list of booleans indicating the input tensors which are being mutated by
1507  // this operator.(e.g. used by RNN and LSTM).
1508  // For example, if the "inputs" array refers to 5 tensors and the second and
1509  // fifth are mutable variables, then this list will contain
1510  // [false, true, false, false, true].
1511  //
1512  // If the list is empty, no variable is mutated in this operator.
1513  // The list either has the same length as `inputs`, or is empty.
1514  mutating_variable_inputs:[bool];
1515
1516  // A list of indices to the subgraph's "tensors" that are internal to an Op.
1517  // Internal tensors are those that do not flow in or out of the operation,
1518  // but instead are part of internal computation. As such, the operation's
1519  // implementation may manage its memory more efficiently. They are needed
1520  // however (i.e. not just an implementation detail) since they are part of the
1521  // computation, which may require relevant metadata such as quantization
1522  // parameters.
1523  intermediates:[int];
1524
1525  // When an op is using custom_options in a model that is larger than 2GB, then
1526  // we instead use the following attributes to find the buffer location which
1527  // is stored outside of flatbuffers, the offset is calculated relative to the
1528  // beginning of the file and is only valid if > 1
1529  large_custom_options_offset: ulong;
1530  large_custom_options_size: ulong;
1531
1532  // Flatbuffers union struct has a 128 elements limit in JAVA, so a second
1533  // union is added, in the case of where BuitlinOptions2 runs out, a third
1534  // one can be added
1535  builtin_options_2 : BuiltinOptions2;
1536}
1537
1538// The root type, defining a subgraph, which typically represents an entire
1539// model.
1540table SubGraph {
1541  // A list of all tensors used in this subgraph.
1542  tensors:[Tensor];
1543
1544  // Indices of the tensors that are inputs into this subgraph. Note this is
1545  // the list of non-static tensors that feed into the subgraph for inference.
1546  inputs:[int];
1547
1548  // Indices of the tensors that are outputs out of this subgraph. Note this is
1549  // the list of output tensors that are considered the product of the
1550  // subgraph's inference.
1551  outputs:[int];
1552
1553  // All operators, in execution order.
1554  operators:[Operator];
1555
1556  // Name of this subgraph (used for debugging).
1557  name:string;
1558}
1559
1560// Table of raw data buffers (used for constant tensors). Referenced by tensors
1561// by index. The generous alignment accommodates mmap-friendly data structures.
1562table Buffer {
1563  data:[ubyte] (force_align: 16);
1564
1565  // In a model that is larger than 2GB, then buffers instead uses the following
1566  // attributes to find stored data, which is outside of flatbuffers
1567  // the offset is calculated relative to the beginning of the file and is only
1568  // valid if > 1.
1569  offset: ulong;
1570  size: ulong;
1571}
1572
1573table Metadata {
1574  // A human readable string to uniquely identify a Metadata.
1575  name:string;
1576  // An index to the buffers table.
1577  buffer:uint;
1578}
1579
1580// Map from an alias name of tensor to tensor index in the graph.
1581// This is used in Signature def.
1582table TensorMap {
1583  // Represents the alias to use for this tensor.
1584  name:string;
1585
1586  // The actual tensor index in the primary graph, that 'name' corresponds to.
1587  tensor_index:uint;
1588}
1589
1590// This corresponds to SignatureDef in Tensorflow SavedModel.
1591// The SignatureDef will be part of the SavedModel provided for conversion.
1592table SignatureDef {
1593  // Named inputs for this signature.
1594  inputs:[TensorMap];
1595
1596  // Named outputs for this signature.
1597  outputs:[TensorMap];
1598
1599  // Key value which was in the Tensorflow SavedModel SignatureDef map.
1600  signature_key:string;
1601
1602  // Model tag, deprecated.
1603  deprecated_tag:string (deprecated);
1604
1605  // Index of subgraphs that corresponds to the exported method.
1606  subgraph_index:uint;
1607}
1608
1609table Model {
1610  // Version of the schema.
1611  version:uint;
1612
1613  // A list of all operator codes used in this model. This is
1614  // kept in order because operators carry an index into this
1615  // vector.
1616  operator_codes:[OperatorCode];
1617
1618  // All the subgraphs of the model. The 0th is assumed to be the main
1619  // model.
1620  subgraphs:[SubGraph];
1621
1622  // A description of the model.
1623  description:string;
1624
1625  // Buffers of the model.
1626  // Note the 0th entry of this array must be an empty buffer (sentinel).
1627  // This is a convention so that tensors without a buffer can provide 0 as
1628  // their buffer.
1629  buffers:[Buffer];
1630
1631  // Metadata about the model. Indirects into the existings buffers list.
1632  // Deprecated, prefer to use metadata field.
1633  metadata_buffer:[int];
1634
1635  // Metadata about the model.
1636  metadata:[Metadata];
1637
1638  // Optional SignatureDefs for the model.
1639  signature_defs:[SignatureDef];
1640}
1641
1642root_type Model;
1643