xref: /aosp_15_r20/external/tflite-support/tensorflow_lite_support/metadata/metadata_schema.fbs (revision b16991f985baa50654c05c5adbb3c8bbcfb40082)
1// Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15namespace tflite;
16
17// TFLite metadata contains both human readable and machine readable information
18// about what the model does and how to use the model. It can be used as a
19// README file, which elaborates the details of the model, each input/ouput
20// tensor, and each associated file.
21//
22// An important use case of TFLite metadata is the TFLite codegen tool, which
23// automatically generates the model interface based on the properties of the
24// model and the tensors. The model interface provides high-level APIs to
25// interact with the model, such as preprocessing the input data and running
26// inferences.
27//
28// Entries marked with "<Codegen usage>" are used in TFLite codegen tool to
29// generate the model interface. It is recommended to fill in at least those
30// enties to boost the codegen performance.
31
32// The Metadata schema is versioned by the Semantic versioning number, such as
33// MAJOR.MINOR.PATCH. It tracks the schema changes according to the rules below:
34//  * Bump up the MAJOR number when making potentially backwards incompatible
35//    changes. It must be incremented if the new changes break the backwards
36//    compatibility. It may also include minor and patch level changes as
37//    needed. The true backwards compatibility is indicated by the file
38//    identifier.
39//  * Bump up the MINOR number when making backwards compatible updates for
40//    major features, such as supporting new content types or adding new
41//    processing units.
42//  * Bump up the PATCH number when making small backwards compatible changes,
43//    such as adding a new fields or deprecating certain fields (not deleting
44//    them).
45//
46// ModelMetadata.min_parser_version indicates the minimum necessary metadata
47// parser version to fully understand all fields in a given metadata flatbuffer.
48//
49// New fields and types will have associated comments with the schema version
50// for which they were added.
51//
52// LINT.IfChange
53// Schema Semantic version: 1.4.1
54// LINT.ThenChange(//tensorflow_lite_support/\
55//     metadata/java/src/java/org/tensorflow/lite/support/metadata/\
56//     MetadataParser.java)
57
58// This indicates the flatbuffer compatibility. The number will bump up when a
59// break change is applied to the schema, such as removing fields or adding new
60// fields to the middle of a table.
61file_identifier "M001";
62
63// History:
64// 1.0.1 - Added VOCABULARY type to AssociatedFileType.
65// 1.1.0 - Added BertTokenizerOptions to ProcessUnitOptions.
66//         Added SentencePieceTokenizerOptions to ProcessUnitOptions.
67//         Added input_process_units to SubGraphMetadata.
68//         Added output_process_units to SubGraphMetadata.
69// 1.2.0 - Added input_tensor_group to SubGraphMetadata.
70//         Added output_tensor_group to SubGraphMetadata.
71// 1.2.1 - Added RegexTokenizerOptions to ProcessUnitOptions.
72// 1.3.0 - Added AudioProperties to ContentProperties.
73// 1.4.0 - Added SCANN_INDEX_FILE type to AssociatedFileType.
74// 1.4.1 - Added version to AssociatedFile.
75
76// File extension of any written files.
77file_extension "tflitemeta";
78
79// LINT.IfChange
80enum AssociatedFileType : byte {
81  UNKNOWN = 0,
82
83  // Files such as readme.txt.
84  DESCRIPTIONS = 1,
85
86  // Contains a list of labels (characters separated by "\n" or in lines) that
87  // annotate certain axis of the tensor. For example,
88  // the label file in image classification. Those labels annotate the
89  // the output tensor, such that each value in the output tensor is the
90  // probability of that corresponding category specified by the label. See the
91  // example label file used in image classification [1].
92  //
93  // <Codegen usage>:
94  // If an output tensor has an associated file as TENSOR_AXIS_LABELS, return
95  // the output as a mapping between the labels and probability in the model
96  // interface.
97  // If multiple files of the same type are present, the first one is used by
98  // default; additional ones are to be distinguished from one another by their
99  // specified locale.
100  //
101  // [1]: https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/labels.txt
102  TENSOR_AXIS_LABELS = 2,
103
104  // Contains a list of labels (characters separated by "\n" or in lines) that
105  // tensor values correspond to. For example, in
106  // the object detection model, one of the output tensors is the detected
107  // classes. And each value in the tensor refers to the index of label in the
108  // category label file. See the example label file used in object detection
109  // [1].
110  //
111  // <Codegen usage>:
112  // If an output tensor has an associated file as TENSOR_VALUE_LABELS, convert
113  // the tensor values into labels, and return a list of string as the output.
114  // If multiple files of the same type are present, the first one is used by
115  // default; additional ones are to be distinguished from one another by their
116  // specified locale.
117  //
118  // [1]: https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/object_detector/labelmap.txt
119  TENSOR_VALUE_LABELS = 3,
120
121  // Contains sigmoid-based score calibration parameters, formatted as CSV.
122  // Lines contain for each index of an output tensor the scale, slope, offset
123  // and (optional) min_score parameters to be used for sigmoid fitting (in this
124  // order and in `strtof`-compatible [1] format). Scale should be a
125  // non-negative value.
126  // A line may be left empty to default calibrated scores for this index to
127  // default_score.
128  // In summary, each line should thus contain 0, 3 or 4 comma-separated values.
129  //
130  // See the example score calibration file used in image classification [2].
131  //
132  // See documentation for ScoreCalibrationOptions for details.
133  //
134  // [1]: https://en.cppreference.com/w/c/string/byte/strtof
135  // [2]: https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/score_calibration.txt
136  TENSOR_AXIS_SCORE_CALIBRATION = 4,
137
138  // Contains a list of unique words (characters separated by "\n" or in lines)
139  // that help to convert natural language words to embedding vectors.
140  //
141  // See the example vocab file used in text classification [1].
142  //
143  // [1]: https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/nl_classifier/vocab.txt
144  // Added in: 1.0.1
145  VOCABULARY = 5,
146
147  // TODO(b/222351186): introduce the ScaNN index file with links once the code
148  // is released.
149  // Contains on-devide ScaNN index file with LevelDB format.
150  // Added in: 1.4.0
151  SCANN_INDEX_FILE = 6,
152}
153
154table AssociatedFile {
155  // Name of this file. Need to be exact the same as the name of the actual file
156  // packed into the TFLite model as a zip file.
157  //
158  // <Codegen usage>:
159  // Locates to the actual file in the TFLite model.
160  name:string;
161
162  // A description of what the file is.
163  description:string;
164
165  // Type of the associated file. There may be special pre/post processing for
166  // some types. For example in image classification, a label file of the output
167  // will be used to convert object index into string.
168  //
169  // <Codegen usage>:
170  // Determines how to process the corresponding tensor.
171  type:AssociatedFileType;
172
173  // An optional locale for this associated file (if applicable). It is
174  // recommended to use an ISO 639-1 letter code (e.g. "en" for English),
175  // optionally completed by a two letter region code (e.g. "en-US" for US
176  // English and "en-CA" for Canadian English).
177  // Leverage this in order to specify e.g multiple label files translated in
178  // different languages.
179  locale:string;
180
181  // Version of the file specified by model creators.
182  // Added in: 1.4.1
183  version:string;
184}
185
186// The basic content type for all tensors.
187//
188// <Codegen usage>:
189// Input feature tensors:
190// 1. Generates the method to load data from a TensorBuffer.
191// 2. Creates the preprocessing logic. The default processing pipeline is:
192// [NormalizeOp, QuantizeOp].
193// Output feature tensors:
194// 1. Generates the method to return the output data to a TensorBuffer.
195// 2. Creates the post-processing logic. The default processing pipeline is:
196// [DeQuantizeOp].
197table FeatureProperties {
198}
199
200// The type of color space of an image.
201enum ColorSpaceType : byte {
202  UNKNOWN = 0,
203  RGB = 1,
204  GRAYSCALE = 2,
205}
206
207table ImageSize {
208  width:uint;
209  height:uint;
210}
211
212// The properties for image tensors.
213//
214// <Codegen usage>:
215// Input image tensors:
216// 1. Generates the method to load an image from a TensorImage.
217// 2. Creates the preprocessing logic. The default processing pipeline is:
218// [ResizeOp, NormalizeOp, QuantizeOp].
219// Output image tensors:
220// 1. Generates the method to return the output data to a TensorImage.
221// 2. Creates the post-processing logic. The default processing pipeline is:
222// [DeQuantizeOp].
223table ImageProperties {
224  // The color space of the image.
225  //
226  // <Codegen usage>:
227  // Determines how to convert the color space of a given image from users.
228  color_space:ColorSpaceType;
229
230  // Indicates the default value of image width and height if the tensor shape
231  // is dynamic. For fixed-size tensor, this size will be consistent with the
232  // expected size.
233  default_size:ImageSize;
234}
235
236// The properties for tensors representing bounding boxes.
237//
238// <Codegen usage>:
239// Input image tensors: NA.
240// Output image tensors: parses the values into a data stucture that represents
241// bounding boxes. For example, in the generated wrapper for Android, it returns
242// the output as android.graphics.Rect objects.
243enum BoundingBoxType : byte {
244  UNKNOWN = 0,
245  // Represents the bounding box by using the combination of boundaries,
246  // {left, top, right, bottom}.
247  // The default order is {left, top, right, bottom}. Other orders can be
248  // indicated by BoundingBoxProperties.index.
249  BOUNDARIES = 1,
250
251  // Represents the bounding box by using the upper_left corner, width and
252  // height.
253  // The default order is {upper_left_x, upper_left_y, width, height}. Other
254  // orders can be indicated by BoundingBoxProperties.index.
255  UPPER_LEFT = 2,
256
257  // Represents the bounding box by using the center of the box, width and
258  // height. The default order is {center_x, center_y, width, height}. Other
259  // orders can be indicated by BoundingBoxProperties.index.
260  CENTER = 3,
261
262}
263
264// The properties for audio tensors.
265// Added in: 1.3.0
266table AudioProperties {
267  // The sample rate in Hz when the audio was captured.
268  sample_rate:uint;
269
270  // The channel count of the audio.
271  channels:uint;
272}
273
274enum CoordinateType : byte {
275  // The coordinates are float values from 0 to 1.
276  RATIO = 0,
277  // The coordinates are integers.
278  PIXEL = 1,
279}
280
281table BoundingBoxProperties {
282  // Denotes the order of the elements defined in each bounding box type. An
283  // empty index array represent the default order of each bounding box type.
284  // For example, to denote the default order of BOUNDARIES, {left, top, right,
285  // bottom}, the index should be {0, 1, 2, 3}. To denote the order {left,
286  // right, top, bottom}, the order should be {0, 2, 1, 3}.
287  //
288  // The index array can be applied to all bounding box types to adjust the
289  // order of their corresponding underlying elements.
290  //
291  // <Codegen usage>:
292  // Indicates how to parse the bounding box values.
293  index:[uint];
294
295  // <Codegen usage>:
296  // Indicates how to parse the bounding box values.
297  type:BoundingBoxType;
298
299  // <Codegen usage>:
300  // Indicates how to convert the bounding box back to the original image in
301  // pixels.
302  coordinate_type:CoordinateType;
303}
304
305union ContentProperties {
306  FeatureProperties,
307  ImageProperties,
308  BoundingBoxProperties,
309  // Added in: 1.3.0
310  AudioProperties,
311}
312
313table ValueRange {
314  min:int;
315  max:int;
316}
317
318table Content {
319  // The properties that the content may have, indicating the type of the
320  // Content.
321  //
322  // <Codegen usage>:
323  // Indicates how to process the tensor.
324  content_properties:ContentProperties;
325
326  // The range of dimensions that the content corresponds to. A NULL
327  // "range" indicates that the content uses up all dimensions,
328  // except the batch axis if applied.
329  //
330  // Here are all the possible situations of how a tensor is composed.
331  // Case 1: The tensor is a single object, such as an image.
332  // For example, the input of an image classifier
333  // (https://www.tensorflow.org/lite/models/image_classification/overview),
334  // a tensor of shape [1, 224, 224, 3]. Dimensions 1 to 3 correspond to the
335  // image. Since dimension 0 is a batch axis, which can be ignored,
336  // "range" can be left as NULL.
337  //
338  // Case 2: The tensor contains multiple instances of the same object.
339  // For example, the output tensor of detected bounding boxes of an object
340  // detection model
341  // (https://www.tensorflow.org/lite/models/object_detection/overview).
342  // The tensor shape is [1, 10, 4]. Here is the what the three dimensions
343  // represent for:
344  // dimension 0: the batch axis.
345  // dimension 1: the 10 objects detected with the highest confidence.
346  // dimension 2: the bounding boxes of the 10 detected objects.
347  // The tensor is essentially 10 bounding boxes. In this case,
348  // "range" should be {min=2; max=2;}.
349  //
350  // The output tensor of scores of the above object detection model has shape
351  // [1, 10], where
352  // dimension 0: the batch axis;
353  // dimension 1: the scores of the 10 detected objects.
354  // Set "range" to the number of dimensions which is {min=2; max=2;} to denote
355  // that every element in the tensor is an individual content object, i.e. a
356  // score in this example.
357  //
358  // Another example is the pose estimation model
359  // (https://www.tensorflow.org/lite/models/pose_estimation/overview).
360  // The output tensor of heatmaps is in the shape of [1, 9, 9, 17].
361  // Here is the what the four dimensions represent for:
362  // dimension 0: the batch axis.
363  // dimension 1/2: the heatmap image.
364  // dimension 3: 17 body parts of a person.
365  // Even though the last axis is body part, the real content of this tensor is
366  // the heatmap. "range" should be [min=1; max=2].
367  //
368  // Case 3: The tensor contains multiple different objects. (Not supported by
369  // Content at this point).
370  // Sometimes a tensor may contain multiple different objects, thus different
371  // contents. It is very common for regression models. For example, a model
372  // to predict the fuel efficiency
373  // (https://www.tensorflow.org/tutorials/keras/regression).
374  // The input tensor has shape [1, 9], consisting of 9 features, such as
375  // "Cylinders", "Displacement", "Weight", etc. In this case, dimension 1
376  // contains 9 different contents. However, since these sub-dimension objects
377  // barely need to be specifically processed, their contents are not recorded
378  // in the metadata. Through, the name of each dimension can be set through
379  // TensorMetadata.dimension_names.
380  //
381  // Note that if it is not case 3, a tensor can only have one content type.
382  //
383  // <Codegen usage>:
384  // Case 1: return a processed single object of certain content type.
385  // Case 2: return a list of processed objects of certain content type. The
386  // generated model interface have API to random access those objects from
387  // the output.
388  range:ValueRange;
389}
390
391// Parameters that are used when normalizing the tensor.
392table NormalizationOptions{
393  // mean and std are normalization parameters. Tensor values are normalized
394  // on a per-channel basis, by the formula
395  //   (x - mean) / std.
396  // If there is only one value in mean or std, we'll propogate the value to
397  // all channels.
398  //
399  // Quantized models share the same normalization parameters as their
400  // corresponding float models. For example, an image input tensor may have
401  // the normalization parameter of
402  //   mean = 127.5f and std = 127.5f.
403  // The image value will be normalized from [0, 255] to [-1, 1].
404  // Then, for quantized models, the image data should be further quantized
405  // according to the quantization parameters. In the case of uint8, the image
406  // data will be scaled back to [0, 255], while for int8, the image data will
407  // be scaled to [-128, 127].
408  //
409  // Both the normalization parameters and quantization parameters can be
410  // retrieved through the metadata extractor library.
411  // TODO(b/156644598): add link for the metadata extractor library.
412
413  // Per-channel mean of the possible values used in normalization.
414  //
415  // <Codegen usage>:
416  // Apply normalization to input tensors accordingly.
417  mean:[float];
418
419  // Per-channel standard dev. of the possible values used in normalization.
420  //
421  // <Codegen usage>:
422  // Apply normalization to input tensors accordingly.
423  std:[float];
424}
425
426// The different possible score transforms to apply to uncalibrated scores
427// before applying score calibration.
428enum ScoreTransformationType : byte {
429  // Identity function: g(x) = x.
430  IDENTITY = 0,
431  // Log function: g(x) = log(x).
432  LOG = 1,
433  // Inverse logistic function: g(x) = log(x) - log(1-x).
434  INVERSE_LOGISTIC = 2,
435}
436
437// Options to perform score calibration on an output tensor through sigmoid
438// functions. One of the main purposes of score calibration is to make scores
439// across classes comparable, so that a common threshold can be used for all
440// output classes. This is meant for models producing class predictions as
441// output, e.g. image classification or detection models.
442//
443// For each index in the output tensor, this applies:
444// * `f(x) = scale / (1 + e^-(slope*g(x)+offset))` if `x > min_score` or if no
445//   `min_score` has been specified,
446// * `f(x) = default_score` otherwise or if no scale, slope and offset have been
447//   specified.
448// Where:
449// * scale, slope, offset and (optional) min_score are index-specific parameters
450// * g(x) is an index-independent transform among those defined in
451//   ScoreTransformationType
452// * default_score is an index-independent parameter.
453// An AssociatedFile with type TANSOR_AXIS_SCORE_CALIBRATION specifying the
454// index-specific parameters must be associated with the corresponding
455// TensorMetadata for score calibration be applied.
456//
457// See the example score calibration file used in image classification [1].
458// [1]: https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/metadata/python/tests/testdata/image_classifier/score_calibration.txt
459table ScoreCalibrationOptions {
460  // The function to use for transforming the uncalibrated score before
461  // applying score calibration.
462  score_transformation:ScoreTransformationType;
463
464  // The default calibrated score to apply if the uncalibrated score is
465  // below min_score or if no parameters were specified for a given index.
466  default_score:float;
467}
468
469// Performs thresholding on output tensor values, in order to filter out
470// low-confidence results.
471table ScoreThresholdingOptions {
472  // The recommended global threshold below which results are considered
473  // low-confidence and should be filtered out.
474  global_score_threshold:float;
475}
476
477// Performs Bert tokenization as in tf.text.BertTokenizer
478// (https://github.com/tensorflow/text/blob/3599f6fcd2b780a2dc413b90fb9315464f10b314/docs/api_docs/python/text/BertTokenizer.md)
479// Added in: 1.1.0
480table BertTokenizerOptions {
481  // The vocabulary files used in the BertTokenizer.
482  vocab_file:[AssociatedFile];
483}
484
485// Performs SentencePiece tokenization as in tf.text.SentencepieceTokenizer
486// (https://github.com/tensorflow/text/blob/3599f6fcd2b780a2dc413b90fb9315464f10b314/docs/api_docs/python/text/SentencepieceTokenizer.md).
487// Added in: 1.1.0
488table SentencePieceTokenizerOptions {
489  // The SentencePiece model files used in the SentencePieceTokenizer.
490  sentencePiece_model:[AssociatedFile];
491
492  // The optional vocabulary model files used in the SentencePieceTokenizer.
493  vocab_file:[AssociatedFile];
494}
495
496// Splits strings by the occurrences of delim_regex_pattern and converts the
497// tokens into ids. For example, given
498//   delim_regex_pattern: "\W+",
499//   string: "Words, words, words.",
500// the tokens after split are: "Words", "words", "words", "".
501// And then the tokens can be converted into ids according to the vocab_file.
502// Added in: 1.2.1
503table RegexTokenizerOptions {
504  delim_regex_pattern:string;
505  // The vocabulary files used to convert this tokens into ids.
506  vocab_file:[AssociatedFile];
507}
508
509// Options that are used when processing the tensor.
510union ProcessUnitOptions {
511  NormalizationOptions,
512  ScoreCalibrationOptions,
513  ScoreThresholdingOptions,
514  // Added in: 1.1.0
515  BertTokenizerOptions,
516  // Added in: 1.1.0
517  SentencePieceTokenizerOptions,
518  // Added in: 1.2.1
519  RegexTokenizerOptions
520}
521
522// A process unit that is used to process the tensor out-of-graph.
523table ProcessUnit {
524  options:ProcessUnitOptions;
525}
526
527
528// Statistics to describe a tensor.
529table Stats {
530  // Max and min are not currently used in tflite.support codegen. They mainly
531  // serve as references for users to better understand the model. They can also
532  // be used to validate model pre/post processing results.
533  // If there is only one value in max or min, we'll propogate the value to
534  // all channels.
535
536  // Per-channel maximum value of the tensor.
537  max:[float];
538
539  // Per-channel minimum value of the tensor.
540  min:[float];
541}
542
543// Metadata of a group of tensors. It may contain several tensors that will be
544// grouped together in codegen. For example, the TFLite object detection model
545// example (https://www.tensorflow.org/lite/models/object_detection/overview)
546// has four outputs: classes, scores, bounding boxes, and number of detections.
547// If the four outputs are bundled together using TensorGroup (for example,
548// named as "detection result"), the codegen tool will generate the class,
549// `DetectionResult`, which contains the class, score, and bouding box. And the
550// outputs of the model will be converted to a list of `DetectionResults` and
551// the number of detection. Note that the number of detection is a single
552// number, therefore is inappropriate for the list of `DetectionResult`.
553// Added in: 1.2.0
554table TensorGroup {
555  // Name of tensor group.
556  //
557  // <codegen usage>:
558  // Name of the joint class of the tensor group.
559  name:string;
560
561  // Names of the tensors to group together, corresponding to
562  // TensorMetadata.name.
563  //
564  // <codegen usage>:
565  // Determines which tensors will be added to this group. All tensors in the
566  // group should have the same number of elements specified by Content.range.
567  tensor_names:[string];
568}
569
570// Detailed information of an input or output tensor.
571table TensorMetadata {
572  // Name of the tensor.
573  //
574  // <Codegen usage>:
575  // The name of this tensor in the generated model interface.
576  name:string;
577
578  // A description of the tensor.
579  description:string;
580
581  // A list of names of the dimensions in this tensor. The length of
582  // dimension_names need to match the number of dimensions in this tensor.
583  //
584  // <Codegen usage>:
585  // The name of each dimension in the generated model interface. See "Case 2"
586  // in the comments of Content.range.
587  dimension_names:[string];
588
589  // The content that represents this tensor.
590  //
591  // <Codegen usage>:
592  // Determines how to process this tensor. See each item in ContentProperties
593  // for the default process units that will be applied to the tensor.
594  content:Content;
595
596  // The process units that are used to process the tensor out-of-graph.
597  //
598  // <Codegen usage>:
599  // Contains the parameters of the default processing pipeline for each content
600  // type, such as the normalization parameters in all content types. See the
601  // items under ContentProperties for the details of the default processing
602  // pipeline.
603  process_units:[ProcessUnit];
604
605  // The statistics of the tensor values.
606  stats:Stats;
607
608  // A list of associated files of this tensor.
609  //
610  // <Codegen usage>:
611  // Contains processing parameters of this tensor, such as normalization.
612  associated_files:[AssociatedFile];
613}
614
615table SubGraphMetadata {
616  // Name of the subgraph.
617  //
618  // Note that, since TFLite only support one subgraph at this moment, the
619  // Codegen tool will use the name in ModelMetadata in the generated model
620  // interface.
621  name:string;
622
623  // A description explains details about what the subgraph does.
624  description:string;
625
626  // Metadata of all input tensors used in this subgraph. It matches extactly
627  // the input tensors specified by `SubGraph.inputs` in the TFLite
628  // schema.fbs file[2]. The number of `TensorMetadata` in the array should
629  // equal to the number of indices in `SubGraph.inputs`.
630  //
631  // [2]: tensorflow/lite/schema/schema.fbs
632  // <Codegen usage>:
633  // Determines how to process the inputs.
634  input_tensor_metadata:[TensorMetadata];
635
636  // Metadata of all output tensors used in this subgraph. It matches extactly
637  // the output tensors specified by `SubGraph.outputs` in the TFLite
638  // schema.fbs file[2]. The number of `TensorMetadata` in the array should
639  // equal to the number of indices in `SubGraph.outputs`.
640  //
641  // <Codegen usage>:
642  // Determines how to process the outputs.
643  output_tensor_metadata:[TensorMetadata];
644
645  // A list of associated files of this subgraph.
646  associated_files:[AssociatedFile];
647
648  // Input process units of the subgraph. Some models may have complex pre and
649  // post processing logics where the process units do not work on one tensor at
650  // a time, but in a similar way of a TFLite graph. For example, in the
651  // MobileBert model (https://www.tensorflow.org/lite/models/bert_qa/overview),
652  // the inputs are: ids / mask / segment ids;
653  // the outputs are: end logits / start logits.
654  // The preprocessing converts the query string and the context string to the
655  // model inputs, and the post-processing converts the model outputs to the
656  // answer string.
657  // Added in: 1.1.0
658  input_process_units:[ProcessUnit];
659
660  // Output process units of the subgraph.
661  // Added in: 1.1.0
662  output_process_units:[ProcessUnit];
663
664  // Metadata of all input tensor groups used in this subgraph.
665  //
666  // <codegen usage>:
667  // Bundles the corresponding elements of the underlying input tensors together
668  // into a class, and converts those individual tensors into a list of the
669  // class objects.
670  // Added in: 1.2.0
671  input_tensor_groups:[TensorGroup];
672
673  // Metadata of all output tensor groups used in this subgraph.
674  //
675  // <codegen usage>:
676  // Bundles the corresponding elements of the underlying output tensors
677  // together into a class, and converts those individual tensors into a list of
678  // the class objects.
679  // Added in: 1.2.0
680  output_tensor_groups:[TensorGroup];
681
682}
683
684table ModelMetadata {
685  // Name of the model.
686  //
687  // <Codegen usage>:
688  // The name of the model in the generated model interface.
689  name:string;
690
691  // Model description in schema.
692  description:string;
693
694  // Version of the model that specified by model creators.
695  version:string;
696
697  // Noted that, the minimum required TFLite runtime version that the model is
698  // compatible with, has already been added as a metadata entry in tflite
699  // schema. We'll decide later if we want to move it here, and keep it with
700  // other metadata entries.
701
702  // Metadata of all the subgraphs of the model. The 0th is assumed to be the
703  // main subgraph.
704  //
705  // <Codegen usage>:
706  // Determines how to process the inputs and outputs.
707  subgraph_metadata:[SubGraphMetadata];
708
709  // The person who creates this model.
710  author:string;
711
712  // Licenses that may apply to this model.
713  license:string;
714
715  // A list of associated files of this model.
716  associated_files:[AssociatedFile];
717
718  // The minimum metadata parser version that can fully understand the fields in
719  // the metadata flatbuffer. The version is effectively the largest version
720  // number among the versions of all the fields populated and the smallest
721  // compatible version indicated by the file identifier.
722  //
723  // This field is automaticaly populated by the MetadataPopulator when
724  // the metadata is populated into a TFLite model.
725  min_parser_version:string;
726}
727// LINT.ThenChange(//tensorflow_lite_support/\
728//     metadata/cc/metadata_version.cc)
729
730root_type ModelMetadata;
731