xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/core/api/flatbuffer_conversions.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
17 
18 #include <cstddef>
19 #include <cstdint>
20 #include <memory>
21 
22 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
23 #include "tensorflow/lite/c/builtin_op_data.h"
24 #include "tensorflow/lite/c/common.h"
25 #include "tensorflow/lite/core/api/error_reporter.h"
26 #include "tensorflow/lite/kernels/internal/compatibility.h"
27 #include "tensorflow/lite/schema/schema_generated.h"
28 
29 namespace tflite {
30 
31 namespace {
32 
33 // Utility class for safely allocating POD data. This is useful for avoiding
34 // leaks in cases where op params are allocated but fail to propagate to the
35 // parsed op data (e.g., when model parameters are invalid).
36 class SafeBuiltinDataAllocator {
37  public:
38   class BuiltinDataDeleter {
39    public:
BuiltinDataDeleter(BuiltinDataAllocator * allocator)40     explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
41         : allocator_(allocator) {}
42 
operator ()(void * data)43     void operator()(void* data) { allocator_->Deallocate(data); }
44 
45    private:
46     BuiltinDataAllocator* allocator_;
47   };
48 
49   template <typename T>
50   using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
51 
SafeBuiltinDataAllocator(BuiltinDataAllocator * allocator)52   explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
53       : allocator_(allocator) {}
54 
55   template <typename T>
Allocate()56   BuiltinDataPtr<T> Allocate() {
57     return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
58                              BuiltinDataDeleter(allocator_));
59   }
60 
61  private:
62   BuiltinDataAllocator* allocator_;
63 };
64 
65 // All the Parse functions take some pointers as params and this function has
66 // the common DCHECKs to catch if any of those are nullptr.
CheckParsePointerParams(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)67 void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
68                              BuiltinDataAllocator* allocator,
69                              void** builtin_data) {
70   TFLITE_DCHECK(op != nullptr);
71   TFLITE_DCHECK(error_reporter != nullptr);
72   TFLITE_DCHECK(allocator != nullptr);
73   TFLITE_DCHECK(builtin_data != nullptr);
74 }
75 
76 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
77 // int array `buffer`. `flat_vector` and `buffer` represent the same
78 // configuration operation for a given operation.
FlatBufferIntVectorToArray(int max_size_of_buffer,const flatbuffers::Vector<int32_t> * flat_vector,int * buffer,ErrorReporter * error_reporter,const char * op_name)79 TfLiteStatus FlatBufferIntVectorToArray(
80     int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
81     int* buffer, ErrorReporter* error_reporter, const char* op_name) {
82   if (!flat_vector) {
83     TF_LITE_REPORT_ERROR(error_reporter,
84                          "Input array not provided for operation '%s'.\n",
85                          op_name);
86     return kTfLiteError;
87   } else {
88     size_t num_dimensions = flat_vector->size();
89     if (num_dimensions > max_size_of_buffer / sizeof(int)) {
90       TF_LITE_REPORT_ERROR(
91           error_reporter,
92           "Found too many dimensions in the input array of operation '%s'.\n",
93           op_name);
94       return kTfLiteError;
95     } else {
96       for (size_t i = 0; i < num_dimensions; ++i) {
97         buffer[i] = flat_vector->Get(i);
98       }
99     }
100   }
101   return kTfLiteOk;
102 }
103 
104 // Converts the flatbuffer activation to what is used at runtime.
ConvertActivation(ActivationFunctionType activation)105 TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
106   switch (activation) {
107     case ActivationFunctionType_NONE:
108       return kTfLiteActNone;
109     case ActivationFunctionType_RELU:
110       return kTfLiteActRelu;
111     case ActivationFunctionType_RELU_N1_TO_1:
112       return kTfLiteActReluN1To1;
113     case ActivationFunctionType_RELU6:
114       return kTfLiteActRelu6;
115     case ActivationFunctionType_TANH:
116       return kTfLiteActTanh;
117     case ActivationFunctionType_SIGN_BIT:
118       return kTfLiteActSignBit;
119   }
120   return kTfLiteActNone;
121 }
122 
123 // Converts the flatbuffer padding enum to what is used at runtime.
ConvertPadding(Padding padding)124 TfLitePadding ConvertPadding(Padding padding) {
125   switch (padding) {
126     case Padding_SAME:
127       return kTfLitePaddingSame;
128     case Padding_VALID:
129       return kTfLitePaddingValid;
130   }
131   return kTfLitePaddingUnknown;
132 }
133 
134 // Converts the flatbuffer mirror padding enum to what is used at runtime.
ConvertMirrorPadding(MirrorPadMode padding)135 TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
136   switch (padding) {
137     case MirrorPadMode_REFLECT:
138       return kTfLiteMirrorPaddingReflect;
139     case MirrorPadMode_SYMMETRIC:
140       return kTfLiteMirrorPaddingSymmetric;
141   }
142   return kTfLiteMirrorPaddingUnknown;
143 }
144 
145 #ifndef TF_LITE_STATIC_MEMORY
ParseOpDataTfLite(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)146 TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
147                                ErrorReporter* error_reporter,
148                                BuiltinDataAllocator* allocator,
149                                void** builtin_data) {
150   auto parseLSHProjectionType = [](LSHProjectionType type) {
151     switch (type) {
152       case LSHProjectionType_SPARSE:
153         return kTfLiteLshProjectionSparse;
154       case LSHProjectionType_DENSE:
155         return kTfLiteLshProjectionDense;
156       default:
157         return kTfLiteLshProjectionUnknown;
158     }
159   };
160   auto parseCombinerType = [](CombinerType type) {
161     switch (type) {
162       case CombinerType_MEAN:
163         return kTfLiteCombinerTypeMean;
164       case CombinerType_SQRTN:
165         return kTfLiteCombinerTypeSqrtn;
166       case CombinerType_SUM:
167       default:
168         return kTfLiteCombinerTypeSum;
169     }
170   };
171 
172   SafeBuiltinDataAllocator safe_allocator(allocator);
173   *builtin_data = nullptr;
174   switch (op_type) {
175     case BuiltinOperator_ABS: {
176       return ParseAbs(op, error_reporter, allocator, builtin_data);
177     }
178 
179     case BuiltinOperator_ADD: {
180       return ParseAdd(op, error_reporter, allocator, builtin_data);
181     }
182 
183     case BuiltinOperator_ADD_N: {
184       return ParseAddN(op, error_reporter, allocator, builtin_data);
185     }
186 
187     case BuiltinOperator_ARG_MAX: {
188       return ParseArgMax(op, error_reporter, allocator, builtin_data);
189     }
190 
191     case BuiltinOperator_ARG_MIN: {
192       return ParseArgMin(op, error_reporter, allocator, builtin_data);
193     }
194 
195     case BuiltinOperator_ASSIGN_VARIABLE: {
196       return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
197     }
198 
199     case BuiltinOperator_AVERAGE_POOL_2D: {
200       return ParsePool(op, error_reporter, allocator, builtin_data);
201     }
202 
203     case BuiltinOperator_BATCH_MATMUL: {
204       return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
205     }
206 
207     case BuiltinOperator_BATCH_TO_SPACE_ND: {
208       return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
209     }
210 
211     case BuiltinOperator_BROADCAST_ARGS: {
212       return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data);
213     }
214 
215     case BuiltinOperator_BROADCAST_TO: {
216       return ParseBroadcastTo(op, error_reporter, allocator, builtin_data);
217     }
218 
219     case BuiltinOperator_CALL_ONCE: {
220       return ParseCallOnce(op, error_reporter, allocator, builtin_data);
221     }
222 
223     case BuiltinOperator_CEIL: {
224       return ParseCeil(op, error_reporter, allocator, builtin_data);
225     }
226 
227     case BuiltinOperator_CONCATENATION: {
228       return ParseConcatenation(op, error_reporter, allocator, builtin_data);
229     }
230 
231     case BuiltinOperator_CONV_2D: {
232       return ParseConv2D(op, error_reporter, allocator, builtin_data);
233     }
234 
235     case BuiltinOperator_CUMSUM: {
236       return ParseCumsum(op, error_reporter, allocator, builtin_data);
237     }
238 
239     case BuiltinOperator_DEPTH_TO_SPACE: {
240       return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
241     }
242 
243     case BuiltinOperator_DEPTHWISE_CONV_2D: {
244       return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
245     }
246 
247     case BuiltinOperator_DEQUANTIZE: {
248       return ParseDequantize(op, error_reporter, allocator, builtin_data);
249     }
250 
251     case BuiltinOperator_DIV: {
252       return ParseDiv(op, error_reporter, allocator, builtin_data);
253     }
254 
255     case BuiltinOperator_ELU: {
256       return ParseElu(op, error_reporter, allocator, builtin_data);
257     }
258 
259     case BuiltinOperator_EXP: {
260       return ParseExp(op, error_reporter, allocator, builtin_data);
261     }
262 
263     case BuiltinOperator_EXPAND_DIMS: {
264       return ParseExpandDims(op, error_reporter, allocator, builtin_data);
265     }
266 
267     case BuiltinOperator_FILL: {
268       return ParseFill(op, error_reporter, allocator, builtin_data);
269     }
270 
271     case BuiltinOperator_FLOOR: {
272       return ParseFloor(op, error_reporter, allocator, builtin_data);
273     }
274 
275     case BuiltinOperator_FLOOR_DIV: {
276       return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
277     }
278 
279     case BuiltinOperator_FLOOR_MOD: {
280       return ParseFloorMod(op, error_reporter, allocator, builtin_data);
281     }
282 
283     case BuiltinOperator_FULLY_CONNECTED: {
284       return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
285     }
286 
287     case BuiltinOperator_GATHER_ND: {
288       return ParseGatherNd(op, error_reporter, allocator, builtin_data);
289     }
290 
291     case BuiltinOperator_GREATER: {
292       return ParseGreater(op, error_reporter, allocator, builtin_data);
293     }
294 
295     case BuiltinOperator_GREATER_EQUAL: {
296       return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
297     }
298 
299     case BuiltinOperator_HARD_SWISH: {
300       return ParseHardSwish(op, error_reporter, allocator, builtin_data);
301     }
302 
303     case BuiltinOperator_L2_NORMALIZATION: {
304       return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
305     }
306 
307     case BuiltinOperator_L2_POOL_2D: {
308       return ParsePool(op, error_reporter, allocator, builtin_data);
309     }
310 
311     case BuiltinOperator_LEAKY_RELU: {
312       return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
313     }
314 
315     case BuiltinOperator_LESS: {
316       return ParseLess(op, error_reporter, allocator, builtin_data);
317     }
318 
319     case BuiltinOperator_LESS_EQUAL: {
320       return ParseLessEqual(op, error_reporter, allocator, builtin_data);
321     }
322 
323     case BuiltinOperator_LOG: {
324       return ParseLog(op, error_reporter, allocator, builtin_data);
325     }
326 
327     case BuiltinOperator_LOGICAL_AND: {
328       return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
329     }
330 
331     case BuiltinOperator_LOGICAL_NOT: {
332       return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
333     }
334 
335     case BuiltinOperator_LOGICAL_OR: {
336       return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
337     }
338 
339     case BuiltinOperator_LOGISTIC: {
340       return ParseLogistic(op, error_reporter, allocator, builtin_data);
341     }
342 
343     case BuiltinOperator_LOG_SOFTMAX: {
344       return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
345     }
346 
347     case BuiltinOperator_LSTM: {
348       return ParseLSTM(op, error_reporter, allocator, builtin_data);
349     }
350 
351     case BuiltinOperator_MAXIMUM: {
352       return ParseMaximum(op, error_reporter, allocator, builtin_data);
353     }
354 
355     case BuiltinOperator_MAX_POOL_2D: {
356       return ParsePool(op, error_reporter, allocator, builtin_data);
357     }
358 
359     case BuiltinOperator_MIRROR_PAD: {
360       return ParseMirrorPad(op, error_reporter, allocator, builtin_data);
361     }
362 
363     case BuiltinOperator_MEAN: {
364       return ParseReducer(op, error_reporter, allocator, builtin_data);
365     }
366 
367     case BuiltinOperator_MINIMUM: {
368       return ParseMinimum(op, error_reporter, allocator, builtin_data);
369     }
370 
371     case BuiltinOperator_MUL: {
372       return ParseMul(op, error_reporter, allocator, builtin_data);
373     }
374 
375     case BuiltinOperator_NEG: {
376       return ParseNeg(op, error_reporter, allocator, builtin_data);
377     }
378 
379     case BuiltinOperator_NOT_EQUAL: {
380       return ParseNotEqual(op, error_reporter, allocator, builtin_data);
381     }
382 
383     case BuiltinOperator_PACK: {
384       return ParsePack(op, error_reporter, allocator, builtin_data);
385     }
386 
387     case BuiltinOperator_PAD: {
388       return ParsePad(op, error_reporter, allocator, builtin_data);
389     }
390 
391     case BuiltinOperator_PADV2: {
392       return ParsePadV2(op, error_reporter, allocator, builtin_data);
393     }
394 
395     case BuiltinOperator_POW: {
396       return ParsePow(op, error_reporter, allocator, builtin_data);
397     }
398 
399     case BuiltinOperator_PRELU: {
400       return ParsePrelu(op, error_reporter, allocator, builtin_data);
401     }
402 
403     case BuiltinOperator_QUANTIZE: {
404       return ParseQuantize(op, error_reporter, allocator, builtin_data);
405     }
406 
407     case BuiltinOperator_READ_VARIABLE: {
408       return ParseReadVariable(op, error_reporter, allocator, builtin_data);
409     }
410 
411     case BuiltinOperator_REDUCE_ANY: {
412       return ParseReducer(op, error_reporter, allocator, builtin_data);
413     }
414 
415     case BuiltinOperator_REDUCE_ALL: {
416       return ParseReducer(op, error_reporter, allocator, builtin_data);
417     }
418 
419     case BuiltinOperator_REDUCE_MAX: {
420       return ParseReducer(op, error_reporter, allocator, builtin_data);
421     }
422 
423     case BuiltinOperator_REDUCE_MIN: {
424       return ParseReducer(op, error_reporter, allocator, builtin_data);
425     }
426 
427     case BuiltinOperator_REDUCE_PROD: {
428       return ParseReducer(op, error_reporter, allocator, builtin_data);
429     }
430 
431     case BuiltinOperator_RELU: {
432       return ParseRelu(op, error_reporter, allocator, builtin_data);
433     }
434 
435     case BuiltinOperator_RELU6: {
436       return ParseRelu6(op, error_reporter, allocator, builtin_data);
437     }
438 
439     case BuiltinOperator_RESHAPE: {
440       return ParseReshape(op, error_reporter, allocator, builtin_data);
441     }
442 
443     case BuiltinOperator_RESIZE_BILINEAR: {
444       return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
445     }
446 
447     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
448       return ParseResizeNearestNeighbor(op, error_reporter, allocator,
449                                         builtin_data);
450     }
451 
452     case BuiltinOperator_ROUND: {
453       return ParseRound(op, error_reporter, allocator, builtin_data);
454     }
455 
456     case BuiltinOperator_RSQRT: {
457       return ParseRsqrt(op, error_reporter, allocator, builtin_data);
458     }
459 
460     case BuiltinOperator_SELECT_V2: {
461       return ParseSelectV2(op, error_reporter, allocator, builtin_data);
462     }
463 
464     case BuiltinOperator_SHAPE: {
465       return ParseShape(op, error_reporter, allocator, builtin_data);
466     }
467 
468     case BuiltinOperator_SIN: {
469       return ParseSin(op, error_reporter, allocator, builtin_data);
470     }
471 
472     case BuiltinOperator_SOFTMAX: {
473       return ParseSoftmax(op, error_reporter, allocator, builtin_data);
474     }
475 
476     case BuiltinOperator_SPACE_TO_BATCH_ND: {
477       return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
478     }
479 
480     case BuiltinOperator_SPACE_TO_DEPTH: {
481       return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
482     }
483 
484     case BuiltinOperator_SPLIT: {
485       return ParseSplit(op, error_reporter, allocator, builtin_data);
486     }
487 
488     case BuiltinOperator_SPLIT_V: {
489       return ParseSplitV(op, error_reporter, allocator, builtin_data);
490     }
491 
492     case BuiltinOperator_SQRT: {
493       return ParseSqrt(op, error_reporter, allocator, builtin_data);
494     }
495 
496     case BuiltinOperator_SQUARE: {
497       return ParseSquare(op, error_reporter, allocator, builtin_data);
498     }
499 
500     case BuiltinOperator_SQUARED_DIFFERENCE: {
501       return ParseSquaredDifference(op, error_reporter, allocator,
502                                     builtin_data);
503     }
504 
505     case BuiltinOperator_SQUEEZE: {
506       return ParseSqueeze(op, error_reporter, allocator, builtin_data);
507     }
508 
509     case BuiltinOperator_STRIDED_SLICE: {
510       return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
511     }
512 
513     case BuiltinOperator_SUB: {
514       return ParseSub(op, error_reporter, allocator, builtin_data);
515     }
516 
517     case BuiltinOperator_SUM: {
518       return ParseReducer(op, error_reporter, allocator, builtin_data);
519     }
520 
521     case BuiltinOperator_SVDF: {
522       return ParseSvdf(op, error_reporter, allocator, builtin_data);
523     }
524 
525     case BuiltinOperator_TANH: {
526       return ParseTanh(op, error_reporter, allocator, builtin_data);
527     }
528 
529     case BuiltinOperator_TRANSPOSE_CONV: {
530       return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
531     }
532 
533     case BuiltinOperator_UNPACK: {
534       return ParseUnpack(op, error_reporter, allocator, builtin_data);
535     }
536 
537     case BuiltinOperator_VAR_HANDLE: {
538       return ParseVarHandle(op, error_reporter, allocator, builtin_data);
539     }
540 
541     case BuiltinOperator_ZEROS_LIKE: {
542       return ParseZerosLike(op, error_reporter, allocator, builtin_data);
543     }
544 
545     case BuiltinOperator_CAST: {
546       return ParseCast(op, error_reporter, allocator, builtin_data);
547     }
548     case BuiltinOperator_LSH_PROJECTION: {
549       auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
550       TF_LITE_ENSURE(error_reporter, params != nullptr);
551       if (const auto* lshParams =
552               op->builtin_options_as_LSHProjectionOptions()) {
553         params->type = parseLSHProjectionType(lshParams->type());
554       }
555       *builtin_data = params.release();
556       return kTfLiteOk;
557     }
558     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
559       auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
560       TF_LITE_ENSURE(error_reporter, params != nullptr);
561       if (const auto* sequence_rnn_params =
562               op->builtin_options_as_SequenceRNNOptions()) {
563         params->activation =
564             ConvertActivation(sequence_rnn_params->fused_activation_function());
565         params->time_major = sequence_rnn_params->time_major();
566         params->asymmetric_quantize_inputs =
567             sequence_rnn_params->asymmetric_quantize_inputs();
568       }
569       *builtin_data = params.release();
570       return kTfLiteOk;
571     }
572     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
573       auto params =
574           safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
575       TF_LITE_ENSURE(error_reporter, params != nullptr);
576       if (const auto* bidi_sequence_rnn_params =
577               op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
578         params->activation = ConvertActivation(
579             bidi_sequence_rnn_params->fused_activation_function());
580         params->time_major = bidi_sequence_rnn_params->time_major();
581         params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
582         params->asymmetric_quantize_inputs =
583             bidi_sequence_rnn_params->asymmetric_quantize_inputs();
584       }
585       *builtin_data = params.release();
586       return kTfLiteOk;
587     }
588     case BuiltinOperator_RNN: {
589       auto params = safe_allocator.Allocate<TfLiteRNNParams>();
590       TF_LITE_ENSURE(error_reporter, params != nullptr);
591       if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
592         params->activation =
593             ConvertActivation(rnn_params->fused_activation_function());
594         params->asymmetric_quantize_inputs =
595             rnn_params->asymmetric_quantize_inputs();
596       }
597       *builtin_data = params.release();
598       return kTfLiteOk;
599     }
600     case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
601       auto params =
602           safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
603       TF_LITE_ENSURE(error_reporter, params != nullptr);
604       if (const auto* embedding_params =
605               op->builtin_options_as_EmbeddingLookupSparseOptions()) {
606         params->combiner = parseCombinerType(embedding_params->combiner());
607       }
608       *builtin_data = params.release();
609       return kTfLiteOk;
610     }
611 
612     case BuiltinOperator_HASHTABLE_LOOKUP:
613       // no-op.
614       return kTfLiteOk;
615 
616     case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
617       auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
618       TF_LITE_ENSURE(error_reporter, params != nullptr);
619       if (const auto* schema_params =
620               op->builtin_options_as_LocalResponseNormalizationOptions()) {
621         params->radius = schema_params->radius();
622         params->bias = schema_params->bias();
623         params->alpha = schema_params->alpha();
624         params->beta = schema_params->beta();
625       }
626       *builtin_data = params.release();
627       return kTfLiteOk;
628     }
629     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
630       return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,
631                                              builtin_data);
632     }
633     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
634       auto params =
635           safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
636       TF_LITE_ENSURE(error_reporter, params != nullptr);
637       if (const auto* bidi_lstm_params =
638               op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
639         params->activation =
640             ConvertActivation(bidi_lstm_params->fused_activation_function());
641         params->cell_clip = bidi_lstm_params->cell_clip();
642         params->proj_clip = bidi_lstm_params->proj_clip();
643         params->merge_outputs = bidi_lstm_params->merge_outputs();
644         params->time_major = bidi_lstm_params->time_major();
645         params->asymmetric_quantize_inputs =
646             bidi_lstm_params->asymmetric_quantize_inputs();
647       }
648       *builtin_data = params.release();
649       return kTfLiteOk;
650     }
651     case BuiltinOperator_SKIP_GRAM: {
652       auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
653       TF_LITE_ENSURE(error_reporter, params != nullptr);
654       if (const auto* skip_gram_params =
655               op->builtin_options_as_SkipGramOptions()) {
656         params->ngram_size = skip_gram_params->ngram_size();
657         params->max_skip_size = skip_gram_params->max_skip_size();
658         params->include_all_ngrams = skip_gram_params->include_all_ngrams();
659       }
660       *builtin_data = params.release();
661       return kTfLiteOk;
662     }
663 
664     case BuiltinOperator_GATHER: {
665       return ParseGather(op, error_reporter, allocator, builtin_data);
666     }
667     case BuiltinOperator_SPARSE_TO_DENSE: {
668       auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
669       TF_LITE_ENSURE(error_reporter, params != nullptr);
670       if (const auto* sparse_to_dense_params =
671               op->builtin_options_as_SparseToDenseOptions()) {
672         params->validate_indices = sparse_to_dense_params->validate_indices();
673       }
674       *builtin_data = params.release();
675       return kTfLiteOk;
676     }
677     case BuiltinOperator_DELEGATE: {
678       TF_LITE_REPORT_ERROR(error_reporter,
679                            "DELEGATE op shouldn't exist in model.");
680       return kTfLiteError;
681     }
682     case BuiltinOperator_FAKE_QUANT: {
683       auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
684       TF_LITE_ENSURE(error_reporter, params != nullptr);
685       if (const auto* schema_params =
686               op->builtin_options_as_FakeQuantOptions()) {
687         params->min = schema_params->min();
688         params->max = schema_params->max();
689         params->num_bits = schema_params->num_bits();
690         params->narrow_range = schema_params->narrow_range();
691       }
692       *builtin_data = params.release();
693       return kTfLiteOk;
694     }
695     case BuiltinOperator_ONE_HOT: {
696       auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
697       TF_LITE_ENSURE(error_reporter, params != nullptr);
698       if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
699         params->axis = schema_params->axis();
700       }
701       *builtin_data = params.release();
702       return kTfLiteOk;
703     }
704     case BuiltinOperator_UNIQUE: {
705       auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
706       TF_LITE_ENSURE(error_reporter, params != nullptr);
707       const auto* unique_params = op->builtin_options_as_UniqueOptions();
708       if (unique_params != nullptr) {
709         params->index_out_type =
710             unique_params->idx_out_type() == tflite::TensorType_INT64
711                 ? TfLiteType::kTfLiteInt64
712                 : TfLiteType::kTfLiteInt32;
713       }
714       *builtin_data = params.release();
715       return kTfLiteOk;
716     }
717     case BuiltinOperator_REVERSE_SEQUENCE: {
718       auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
719       TF_LITE_ENSURE(error_reporter, params != nullptr);
720       if (const auto* reverse_seq_params =
721               op->builtin_options_as_ReverseSequenceOptions()) {
722         params->seq_dim = reverse_seq_params->seq_dim();
723         params->batch_dim = reverse_seq_params->batch_dim();
724       }
725       *builtin_data = params.release();
726       return kTfLiteOk;
727     }
728     case BuiltinOperator_IF: {
729       auto params = safe_allocator.Allocate<TfLiteIfParams>();
730       TF_LITE_ENSURE(error_reporter, params != nullptr);
731       if (const auto* if_params = op->builtin_options_as_IfOptions()) {
732         params->then_subgraph_index = if_params->then_subgraph_index();
733         params->else_subgraph_index = if_params->else_subgraph_index();
734       }
735       *builtin_data = params.release();
736       return kTfLiteOk;
737     }
738     case BuiltinOperator_WHILE: {
739       auto params = safe_allocator.Allocate<TfLiteWhileParams>();
740       TF_LITE_ENSURE(error_reporter, params != nullptr);
741       if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
742         params->cond_subgraph_index = while_params->cond_subgraph_index();
743         params->body_subgraph_index = while_params->body_subgraph_index();
744       }
745       *builtin_data = params.release();
746       return kTfLiteOk;
747     }
748     case BuiltinOperator_CONV_3D:
749     case BuiltinOperator_CONV_3D_TRANSPOSE: {
750       auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
751       TF_LITE_ENSURE(error_reporter, params != nullptr);
752       if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
753         params->padding = ConvertPadding(conv3d_params->padding());
754         params->activation =
755             ConvertActivation(conv3d_params->fused_activation_function());
756         params->stride_depth = conv3d_params->stride_d();
757         params->stride_height = conv3d_params->stride_h();
758         params->stride_width = conv3d_params->stride_w();
759         params->dilation_depth_factor = conv3d_params->dilation_d_factor();
760         params->dilation_height_factor = conv3d_params->dilation_h_factor();
761         params->dilation_width_factor = conv3d_params->dilation_w_factor();
762       }
763       *builtin_data = params.release();
764       return kTfLiteOk;
765     }
766     case BuiltinOperator_HASHTABLE: {
767       auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
768       TF_LITE_ENSURE(error_reporter, params != nullptr);
769       if (const auto* hashtable_params =
770               op->builtin_options_as_HashtableOptions()) {
771         params->table_id = hashtable_params->table_id();
772         TF_LITE_ENSURE_STATUS(ConvertTensorType(
773             hashtable_params->key_dtype(), &params->key_dtype, error_reporter));
774         TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
775                                                 &params->value_dtype,
776                                                 error_reporter));
777       }
778       *builtin_data = params.release();
779       return kTfLiteOk;
780     }
781     case BuiltinOperator_MULTINOMIAL: {
782       auto params = safe_allocator.Allocate<TfLiteRandomParams>();
783       TF_LITE_ENSURE(error_reporter, params != nullptr);
784       if (const auto* multinomial_params =
785               op->builtin_options_as_RandomOptions()) {
786         params->seed = multinomial_params->seed();
787         params->seed2 = multinomial_params->seed2();
788       }
789       *builtin_data = params.release();
790       return kTfLiteOk;
791     }
792     case BuiltinOperator_RANDOM_STANDARD_NORMAL: {
793       auto params = safe_allocator.Allocate<TfLiteRandomParams>();
794       TF_LITE_ENSURE(error_reporter, params != nullptr);
795       if (const auto* random_std_normal_params =
796               op->builtin_options_as_RandomOptions()) {
797         params->seed = random_std_normal_params->seed();
798         params->seed2 = random_std_normal_params->seed2();
799       }
800       *builtin_data = params.release();
801       return kTfLiteOk;
802     }
803     case BuiltinOperator_BUCKETIZE: {
804       auto params = safe_allocator.Allocate<TfLiteBucketizeParams>();
805       TF_LITE_ENSURE(error_reporter, params != nullptr);
806       if (const auto* bucketize_params =
807               op->builtin_options_as_BucketizeOptions()) {
808         const flatbuffers::Vector<float>* boundaries =
809             bucketize_params->boundaries();
810         if (boundaries == nullptr) {
811           TF_LITE_REPORT_ERROR(
812               error_reporter,
813               "boundaries array not provided for operation 'bucketize'.\n");
814           return kTfLiteError;
815         }
816         params->num_boundaries = boundaries->size();
817         if (boundaries->data() == nullptr) {
818           TF_LITE_REPORT_ERROR(error_reporter,
819                                "boundaries.data() returned nullptr for "
820                                "operation 'bucketize'.\n");
821           return kTfLiteError;
822         }
823         params->boundaries = boundaries->data();
824       }
825       *builtin_data = params.release();
826       return kTfLiteOk;
827     }
828     case BuiltinOperator_RANDOM_UNIFORM: {
829       auto params = safe_allocator.Allocate<TfLiteRandomParams>();
830       TF_LITE_ENSURE(error_reporter, params != nullptr);
831       if (const auto* random_uniform_params =
832               op->builtin_options_as_RandomOptions()) {
833         params->seed = random_uniform_params->seed();
834         params->seed2 = random_uniform_params->seed2();
835       }
836       *builtin_data = params.release();
837       return kTfLiteOk;
838     }
839     case BuiltinOperator_GELU: {
840       auto params = safe_allocator.Allocate<TfLiteGeluParams>();
841       TF_LITE_ENSURE(error_reporter, params != nullptr);
842       if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {
843         params->approximate = gelu_params->approximate();
844       }
845       *builtin_data = params.release();
846       return kTfLiteOk;
847     }
848     // Below are the ops with no builtin_data structure.
849     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
850     // ok for now, since there is no call implementation either.
851     case BuiltinOperator_CALL:
852     case BuiltinOperator_COMPLEX_ABS:
853     case BuiltinOperator_CONCAT_EMBEDDINGS:
854     case BuiltinOperator_COS:
855     case BuiltinOperator_CUSTOM:
856     case BuiltinOperator_DENSIFY:
857     case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
858     case BuiltinOperator_EMBEDDING_LOOKUP:
859     case BuiltinOperator_EQUAL:
860     case BuiltinOperator_HASHTABLE_FIND:
861     case BuiltinOperator_HASHTABLE_IMPORT:
862     case BuiltinOperator_HASHTABLE_SIZE:
863     case BuiltinOperator_IMAG:
864     case BuiltinOperator_MATRIX_DIAG:
865     case BuiltinOperator_MATRIX_SET_DIAG:
866     case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
867     case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
868     case BuiltinOperator_RELU_N1_TO_1:
869     case BuiltinOperator_RELU_0_TO_1:
870     case BuiltinOperator_SCATTER_ND:
871     case BuiltinOperator_SELECT:
872     case BuiltinOperator_SLICE:
873     case BuiltinOperator_TILE:
874     case BuiltinOperator_TOPK_V2:
875     case BuiltinOperator_TRANSPOSE:
876     case BuiltinOperator_RANGE:
877     case BuiltinOperator_RANK:
878     case BuiltinOperator_REAL:
879     case BuiltinOperator_RFFT2D:
880     case BuiltinOperator_SEGMENT_SUM:
881     case BuiltinOperator_REVERSE_V2:
882     case BuiltinOperator_UNSORTED_SEGMENT_MAX:
883     case BuiltinOperator_UNSORTED_SEGMENT_MIN:
884     case BuiltinOperator_UNSORTED_SEGMENT_PROD:
885     case BuiltinOperator_UNSORTED_SEGMENT_SUM:
886     case BuiltinOperator_ATAN2:
887     case BuiltinOperator_SIGN:
888     case BuiltinOperator_WHERE:
889       return kTfLiteOk;
890     case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
891       return kTfLiteError;
892   }
893   return kTfLiteError;
894 }  // NOLINT[readability/fn_size]
895 #endif  // !defined(TF_LITE_STATIC_MEMORY)
896 }  // namespace
897 
ConvertTensorType(TensorType tensor_type,TfLiteType * type,ErrorReporter * error_reporter)898 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
899                                ErrorReporter* error_reporter) {
900   switch (tensor_type) {
901     case TensorType_FLOAT16:
902       *type = kTfLiteFloat16;
903       return kTfLiteOk;
904     case TensorType_FLOAT32:
905       *type = kTfLiteFloat32;
906       return kTfLiteOk;
907     case TensorType_FLOAT64:
908       *type = kTfLiteFloat64;
909       return kTfLiteOk;
910     case TensorType_INT16:
911       *type = kTfLiteInt16;
912       return kTfLiteOk;
913     case TensorType_UINT16:
914       *type = kTfLiteUInt16;
915       return kTfLiteOk;
916     case TensorType_INT32:
917       *type = kTfLiteInt32;
918       return kTfLiteOk;
919     case TensorType_UINT32:
920       *type = kTfLiteUInt32;
921       return kTfLiteOk;
922     case TensorType_UINT8:
923       *type = kTfLiteUInt8;
924       return kTfLiteOk;
925     case TensorType_INT8:
926       *type = kTfLiteInt8;
927       return kTfLiteOk;
928     case TensorType_INT64:
929       *type = kTfLiteInt64;
930       return kTfLiteOk;
931     case TensorType_UINT64:
932       *type = kTfLiteUInt64;
933       return kTfLiteOk;
934     case TensorType_STRING:
935       *type = kTfLiteString;
936       return kTfLiteOk;
937     case TensorType_BOOL:
938       *type = kTfLiteBool;
939       return kTfLiteOk;
940     case TensorType_COMPLEX64:
941       *type = kTfLiteComplex64;
942       return kTfLiteOk;
943     case TensorType_COMPLEX128:
944       *type = kTfLiteComplex128;
945       return kTfLiteOk;
946     case TensorType_RESOURCE:
947       *type = kTfLiteResource;
948       return kTfLiteOk;
949     case TensorType_VARIANT:
950       *type = kTfLiteVariant;
951       return kTfLiteOk;
952     default:
953       *type = kTfLiteNoType;
954       TF_LITE_REPORT_ERROR(error_reporter,
955                            "Unsupported data type %d in tensor\n", tensor_type);
956       return kTfLiteError;
957   }
958 }
959 
960 // We have this parse function instead of directly returning kTfLiteOk from the
961 // switch-case in ParseOpData because this function is used as part of the
962 // selective registration for the OpResolver implementation in micro.
ParseAbs(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)963 TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
964                       void**) {
965   return kTfLiteOk;
966 }
967 
ParseAdd(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)968 TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
969                       BuiltinDataAllocator* allocator, void** builtin_data) {
970   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
971 
972   SafeBuiltinDataAllocator safe_allocator(allocator);
973   std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
974       params = safe_allocator.Allocate<TfLiteAddParams>();
975   TF_LITE_ENSURE(error_reporter, params != nullptr);
976 
977   const AddOptions* schema_params = op->builtin_options_as_AddOptions();
978 
979   if (schema_params != nullptr) {
980     params->activation =
981         ConvertActivation(schema_params->fused_activation_function());
982     params->pot_scale_int16 = schema_params->pot_scale_int16();
983   } else {
984     // TODO(b/157480169): We should either return kTfLiteError or fill in some
985     // reasonable defaults in the params struct. We are not doing so until we
986     // better undertand the ramifications of changing the legacy behavior.
987   }
988 
989   *builtin_data = params.release();
990   return kTfLiteOk;
991 }
992 
ParseAddN(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)993 TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
994                        BuiltinDataAllocator* allocator, void** builtin_data) {
995   return kTfLiteOk;
996 }
997 
ParseArgMax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)998 TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
999                          BuiltinDataAllocator* allocator, void** builtin_data) {
1000   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1001 
1002   SafeBuiltinDataAllocator safe_allocator(allocator);
1003   std::unique_ptr<TfLiteArgMaxParams,
1004                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1005       params = safe_allocator.Allocate<TfLiteArgMaxParams>();
1006   TF_LITE_ENSURE(error_reporter, params != nullptr);
1007 
1008   const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
1009 
1010   if (schema_params != nullptr) {
1011     TF_LITE_ENSURE_STATUS(ConvertTensorType(
1012         schema_params->output_type(), &params->output_type, error_reporter));
1013   } else {
1014     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1015     // reasonable defaults in the params struct. We are not doing so until we
1016     // better undertand the ramifications of changing the legacy behavior.
1017   }
1018 
1019   *builtin_data = params.release();
1020   return kTfLiteOk;
1021 }
1022 
ParseArgMin(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1023 TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
1024                          BuiltinDataAllocator* allocator, void** builtin_data) {
1025   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1026 
1027   SafeBuiltinDataAllocator safe_allocator(allocator);
1028   std::unique_ptr<TfLiteArgMinParams,
1029                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1030       params = safe_allocator.Allocate<TfLiteArgMinParams>();
1031   TF_LITE_ENSURE(error_reporter, params != nullptr);
1032 
1033   const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
1034 
1035   if (schema_params != nullptr) {
1036     TF_LITE_ENSURE_STATUS(ConvertTensorType(
1037         schema_params->output_type(), &params->output_type, error_reporter));
1038   } else {
1039     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1040     // reasonable defaults in the params struct. We are not doing so until we
1041     // better undertand the ramifications of changing the legacy behavior.
1042   }
1043 
1044   *builtin_data = params.release();
1045   return kTfLiteOk;
1046 }
1047 
1048 // We have this parse function instead of directly returning kTfLiteOk from the
1049 // switch-case in ParseOpData because this function is used as part of the
1050 // selective registration for the OpResolver implementation in micro.
ParseAssignVariable(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1051 TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,
1052                                  BuiltinDataAllocator*, void**) {
1053   return kTfLiteOk;
1054 }
1055 
1056 // We have this parse function instead of directly returning kTfLiteOk from the
1057 // switch-case in ParseOpData because this function is used as part of the
1058 // selective registration for the OpResolver implementation in micro.
ParseBatchMatMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1059 TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
1060                               BuiltinDataAllocator* allocator,
1061                               void** builtin_data) {
1062   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1063 
1064   SafeBuiltinDataAllocator safe_allocator(allocator);
1065   auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
1066   TF_LITE_ENSURE(error_reporter, params != nullptr);
1067   if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
1068     params->adj_x = bmm_params->adj_x();
1069     params->adj_y = bmm_params->adj_y();
1070     params->asymmetric_quantize_inputs =
1071         bmm_params->asymmetric_quantize_inputs();
1072   }
1073   *builtin_data = params.release();
1074   return kTfLiteOk;
1075 }
1076 
1077 // We have this parse function instead of directly returning kTfLiteOk from the
1078 // switch-case in ParseOpData because this function is used as part of the
1079 // selective registration for the OpResolver implementation in micro.
ParseBatchToSpaceNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1080 TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
1081                                  BuiltinDataAllocator*, void**) {
1082   return kTfLiteOk;
1083 }
1084 
1085 // We have this parse function instead of directly returning kTfLiteOk from the
1086 // switch-case in ParseOpData because this function is used as part of the
1087 // selective registration for the OpResolver implementation in micro.
ParseBroadcastArgs(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1088 TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*,
1089                                 BuiltinDataAllocator*, void**) {
1090   return kTfLiteOk;
1091 }
1092 
1093 // We have this parse function instead of directly returning kTfLiteOk from the
1094 // switch-case in ParseOpData because this function is used as part of the
1095 // selective registration for the OpResolver implementation in micro.
ParseBroadcastTo(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1096 TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*,
1097                               BuiltinDataAllocator*, void**) {
1098   return kTfLiteOk;
1099 }
1100 
ParseCallOnce(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1101 TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
1102                            BuiltinDataAllocator* allocator,
1103                            void** builtin_data) {
1104   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1105 
1106   SafeBuiltinDataAllocator safe_allocator(allocator);
1107   std::unique_ptr<TfLiteCallOnceParams,
1108                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1109       params = safe_allocator.Allocate<TfLiteCallOnceParams>();
1110   TF_LITE_ENSURE(error_reporter, params != nullptr);
1111 
1112   const CallOnceOptions* schema_params =
1113       op->builtin_options_as_CallOnceOptions();
1114 
1115   if (schema_params != nullptr) {
1116     params->init_subgraph_index = schema_params->init_subgraph_index();
1117 
1118   } else {
1119     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1120     // reasonable defaults in the params struct. We are not doing so until we
1121     // better undertand the ramifications of changing the legacy behavior.
1122   }
1123 
1124   *builtin_data = params.release();
1125   return kTfLiteOk;
1126 }
1127 
1128 // We have this parse function instead of directly returning kTfLiteOk from the
1129 // switch-case in ParseOpData because this function is used as part of the
1130 // selective registration for the OpResolver implementation in micro.
ParseCast(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1131 TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
1132                        BuiltinDataAllocator* allocator, void** builtin_data) {
1133   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1134 
1135   SafeBuiltinDataAllocator safe_allocator(allocator);
1136   auto params = safe_allocator.Allocate<TfLiteCastParams>();
1137   TF_LITE_ENSURE(error_reporter, params != nullptr);
1138   if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
1139     TF_LITE_ENSURE_STATUS(ConvertTensorType(
1140         schema_params->in_data_type(), &params->in_data_type, error_reporter));
1141     TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
1142                                             &params->out_data_type,
1143                                             error_reporter));
1144   }
1145   *builtin_data = params.release();
1146   return kTfLiteOk;
1147 }
1148 
1149 // We have this parse function instead of directly returning kTfLiteOk from the
1150 // switch-case in ParseOpData because this function is used as part of the
1151 // selective registration for the OpResolver implementation in micro.
ParseCeil(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1152 TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1153                        void**) {
1154   return kTfLiteOk;
1155 }
1156 
ParseConcatenation(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1157 TfLiteStatus ParseConcatenation(const Operator* op,
1158                                 ErrorReporter* error_reporter,
1159                                 BuiltinDataAllocator* allocator,
1160                                 void** builtin_data) {
1161   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1162 
1163   SafeBuiltinDataAllocator safe_allocator(allocator);
1164   std::unique_ptr<TfLiteConcatenationParams,
1165                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1166       params = safe_allocator.Allocate<TfLiteConcatenationParams>();
1167   TF_LITE_ENSURE(error_reporter, params != nullptr);
1168 
1169   const ConcatenationOptions* schema_params =
1170       op->builtin_options_as_ConcatenationOptions();
1171 
1172   if (schema_params != nullptr) {
1173     params->activation =
1174         ConvertActivation(schema_params->fused_activation_function());
1175     params->axis = schema_params->axis();
1176   } else {
1177     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1178     // reasonable defaults in the params struct. We are not doing so until we
1179     // better undertand the ramifications of changing the legacy behavior.
1180   }
1181 
1182   *builtin_data = params.release();
1183   return kTfLiteOk;
1184 }
1185 
ParseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1186 TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
1187                          BuiltinDataAllocator* allocator, void** builtin_data) {
1188   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1189 
1190   SafeBuiltinDataAllocator safe_allocator(allocator);
1191   std::unique_ptr<TfLiteConvParams,
1192                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1193       params = safe_allocator.Allocate<TfLiteConvParams>();
1194   TF_LITE_ENSURE(error_reporter, params != nullptr);
1195 
1196   const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
1197 
1198   if (schema_params != nullptr) {
1199     params->padding = ConvertPadding(schema_params->padding());
1200     params->stride_width = schema_params->stride_w();
1201     params->stride_height = schema_params->stride_h();
1202     params->activation =
1203         ConvertActivation(schema_params->fused_activation_function());
1204 
1205     params->dilation_width_factor = schema_params->dilation_w_factor();
1206     params->dilation_height_factor = schema_params->dilation_h_factor();
1207   } else {
1208     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1209     // reasonable defaults in the params struct. We are not doing so until we
1210     // better undertand the ramifications of changing the legacy behavior.
1211   }
1212 
1213   *builtin_data = params.release();
1214   return kTfLiteOk;
1215 }
1216 
1217 // We have this parse function instead of directly returning kTfLiteOk from the
1218 // switch-case in ParseOpData because this function is used as part of the
1219 // selective registration for the OpResolver implementation in micro.
ParseCumsum(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1220 TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
1221                          BuiltinDataAllocator* allocator, void** builtin_data) {
1222   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1223 
1224   SafeBuiltinDataAllocator safe_allocator(allocator);
1225   auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
1226   TF_LITE_ENSURE(error_reporter, params != nullptr);
1227   if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
1228     params->exclusive = cumsum_params->exclusive();
1229     params->reverse = cumsum_params->reverse();
1230   }
1231   *builtin_data = params.release();
1232   return kTfLiteOk;
1233 }
1234 
1235 // We have this parse function instead of directly returning kTfLiteOk from the
1236 // switch-case in ParseOpData because this function is used as part of the
1237 // selective registration for the OpResolver implementation in micro.
ParseCos(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1238 TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1239                       void**) {
1240   return kTfLiteOk;
1241 }
1242 
ParseDepthToSpace(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1243 TfLiteStatus ParseDepthToSpace(const Operator* op,
1244                                ErrorReporter* error_reporter,
1245                                BuiltinDataAllocator* allocator,
1246                                void** builtin_data) {
1247   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1248 
1249   SafeBuiltinDataAllocator safe_allocator(allocator);
1250   std::unique_ptr<TfLiteDepthToSpaceParams,
1251                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1252       params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
1253   TF_LITE_ENSURE(error_reporter, params != nullptr);
1254 
1255   const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
1256   if (schema_params != nullptr) {
1257     params->block_size = schema_params->block_size();
1258   } else {
1259     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1260     // reasonable defaults in the params struct. We are not doing so until we
1261     // better undertand the ramifications of changing the legacy behavior.
1262   }
1263 
1264   *builtin_data = params.release();
1265   return kTfLiteOk;
1266 }
1267 
ParseDepthwiseConv2D(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1268 TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
1269                                   ErrorReporter* error_reporter,
1270                                   BuiltinDataAllocator* allocator,
1271                                   void** builtin_data) {
1272   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1273 
1274   SafeBuiltinDataAllocator safe_allocator(allocator);
1275 
1276   std::unique_ptr<TfLiteDepthwiseConvParams,
1277                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1278       params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
1279   TF_LITE_ENSURE(error_reporter, params != nullptr);
1280 
1281   const DepthwiseConv2DOptions* schema_params =
1282       op->builtin_options_as_DepthwiseConv2DOptions();
1283 
1284   if (schema_params != nullptr) {
1285     params->padding = ConvertPadding(schema_params->padding());
1286     params->stride_width = schema_params->stride_w();
1287     params->stride_height = schema_params->stride_h();
1288     params->depth_multiplier = schema_params->depth_multiplier();
1289     params->activation =
1290         ConvertActivation(schema_params->fused_activation_function());
1291 
1292     params->dilation_width_factor = schema_params->dilation_w_factor();
1293     params->dilation_height_factor = schema_params->dilation_h_factor();
1294   } else {
1295     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1296     // reasonable defaults in the params struct. We are not doing so until we
1297     // better undertand the ramifications of changing the legacy behavior.
1298   }
1299 
1300   *builtin_data = params.release();
1301   return kTfLiteOk;
1302 }
1303 
1304 // We have this parse function instead of directly returning kTfLiteOk from the
1305 // switch-case in ParseOpData because this function is used as part of the
1306 // selective registration for the OpResolver implementation in micro.
ParseDequantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1307 TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
1308                              BuiltinDataAllocator*, void**) {
1309   return kTfLiteOk;
1310 }
1311 
ParseDiv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1312 TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
1313                       BuiltinDataAllocator* allocator, void** builtin_data) {
1314   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1315 
1316   SafeBuiltinDataAllocator safe_allocator(allocator);
1317   auto params = safe_allocator.Allocate<TfLiteDivParams>();
1318   TF_LITE_ENSURE(error_reporter, params != nullptr);
1319   if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
1320     params->activation =
1321         ConvertActivation(schema_params->fused_activation_function());
1322   }
1323   *builtin_data = params.release();
1324   return kTfLiteOk;
1325 }
1326 
1327 // We have this parse function instead of directly returning kTfLiteOk from the
1328 // switch-case in ParseOpData because this function is used as part of the
1329 // selective registration for the OpResolver implementation in micro.
ParseElu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1330 TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1331                       void**) {
1332   return kTfLiteOk;
1333 }
1334 
1335 // We have this parse function instead of directly returning kTfLiteOk from the
1336 // switch-case in ParseOpData because this function is used as part of the
1337 // selective registration for the OpResolver implementation in micro.
ParseEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1338 TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1339                         void**) {
1340   return kTfLiteOk;
1341 }
1342 
1343 // We have this parse function instead of directly returning kTfLiteOk from the
1344 // switch-case in ParseOpData because this function is used as part of the
1345 // selective registration for the OpResolver implementation in micro.
ParseExp(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1346 TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1347                       void**) {
1348   return kTfLiteOk;
1349 }
1350 
1351 // We have this parse function instead of directly returning kTfLiteOk from the
1352 // switch-case in ParseOpData because this function is used as part of the
1353 // selective registration for the OpResolver implementation in micro.
ParseExpandDims(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1354 TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
1355                              BuiltinDataAllocator*, void**) {
1356   return kTfLiteOk;
1357 }
1358 
1359 // We have this parse function instead of directly returning kTfLiteOk from the
1360 // switch-case in ParseOpData because this function is used as part of the
1361 // selective registration for the OpResolver implementation in micro.
ParseFill(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1362 TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1363                        void**) {
1364   return kTfLiteOk;
1365 }
1366 
1367 // We have this parse function instead of directly returning kTfLiteOk from the
1368 // switch-case in ParseOpData because this function is used as part of the
1369 // selective registration for the OpResolver implementation in micro.
ParseFloor(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1370 TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1371                         void**) {
1372   return kTfLiteOk;
1373 }
1374 
1375 // We have this parse function instead of directly returning kTfLiteOk from the
1376 // switch-case in ParseOpData because this function is used as part of the
1377 // selective registration for the OpResolver implementation in micro.
ParseFloorDiv(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1378 TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
1379                            BuiltinDataAllocator*, void**) {
1380   return kTfLiteOk;
1381 }
1382 
1383 // We have this parse function instead of directly returning kTfLiteOk from the
1384 // switch-case in ParseOpData because this function is used as part of the
1385 // selective registration for the OpResolver implementation in micro.
ParseFloorMod(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1386 TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
1387                            BuiltinDataAllocator*, void**) {
1388   return kTfLiteOk;
1389 }
1390 
ParseFullyConnected(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1391 TfLiteStatus ParseFullyConnected(const Operator* op,
1392                                  ErrorReporter* error_reporter,
1393                                  BuiltinDataAllocator* allocator,
1394                                  void** builtin_data) {
1395   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1396 
1397   SafeBuiltinDataAllocator safe_allocator(allocator);
1398 
1399   std::unique_ptr<TfLiteFullyConnectedParams,
1400                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1401       params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
1402   TF_LITE_ENSURE(error_reporter, params != nullptr);
1403 
1404   const FullyConnectedOptions* schema_params =
1405       op->builtin_options_as_FullyConnectedOptions();
1406 
1407   if (schema_params != nullptr) {
1408     params->activation =
1409         ConvertActivation(schema_params->fused_activation_function());
1410     params->keep_num_dims = schema_params->keep_num_dims();
1411     params->asymmetric_quantize_inputs =
1412         schema_params->asymmetric_quantize_inputs();
1413 
1414     switch (schema_params->weights_format()) {
1415       case FullyConnectedOptionsWeightsFormat_DEFAULT:
1416         params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
1417         break;
1418       case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
1419         params->weights_format =
1420             kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
1421         break;
1422       default:
1423         TF_LITE_REPORT_ERROR(error_reporter,
1424                              "Unhandled fully-connected weights format.");
1425         return kTfLiteError;
1426     }
1427   } else {
1428     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1429     // reasonable defaults in the params struct. We are not doing so until we
1430     // better undertand the ramifications of changing the legacy behavior.
1431   }
1432 
1433   *builtin_data = params.release();
1434   return kTfLiteOk;
1435 }
1436 
1437 // We have this parse function instead of directly returning kTfLiteOk from the
1438 // switch-case in ParseOpData because this function is used as part of the
1439 // selective registration for the OpResolver implementation in micro.
ParseGather(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1440 TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
1441                          BuiltinDataAllocator* allocator, void** builtin_data) {
1442   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1443 
1444   SafeBuiltinDataAllocator safe_allocator(allocator);
1445   auto params = safe_allocator.Allocate<TfLiteGatherParams>();
1446   TF_LITE_ENSURE(error_reporter, params != nullptr);
1447   params->axis = 0;
1448   params->batch_dims = 0;
1449   if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
1450     params->axis = gather_params->axis();
1451     params->batch_dims = gather_params->batch_dims();
1452   }
1453 
1454   *builtin_data = params.release();
1455   return kTfLiteOk;
1456 }
1457 
1458 // We have this parse function instead of directly returning kTfLiteOk from the
1459 // switch-case in ParseOpData because this function is used as part of the
1460 // selective registration for the OpResolver implementation in micro.
ParseGatherNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1461 TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
1462                            BuiltinDataAllocator*, void**) {
1463   return kTfLiteOk;
1464 }
1465 
1466 // We have this parse function instead of directly returning kTfLiteOk from the
1467 // switch-case in ParseOpData because this function is used as part of the
1468 // selective registration for the OpResolver implementation in micro.
ParseGreater(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1469 TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
1470                           BuiltinDataAllocator*, void**) {
1471   return kTfLiteOk;
1472 }
1473 
1474 // We have this parse function instead of directly returning kTfLiteOk from the
1475 // switch-case in ParseOpData because this function is used as part of the
1476 // selective registration for the OpResolver implementation in micro.
ParseGreaterEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1477 TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
1478                                BuiltinDataAllocator*, void**) {
1479   return kTfLiteOk;
1480 }
1481 
1482 // We have this parse function instead of directly returning kTfLiteOk from the
1483 // switch-case in ParseOpData because this function is used as part of the
1484 // selective registration for the OpResolver implementation in micro.
ParseHardSwish(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1485 TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
1486                             BuiltinDataAllocator*, void**) {
1487   return kTfLiteOk;
1488 }
1489 
ParseIf(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1490 TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
1491                      BuiltinDataAllocator* allocator, void** builtin_data) {
1492   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1493 
1494   SafeBuiltinDataAllocator safe_allocator(allocator);
1495   std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1496       params = safe_allocator.Allocate<TfLiteIfParams>();
1497   TF_LITE_ENSURE(error_reporter, params != nullptr);
1498 
1499   const IfOptions* schema_params = op->builtin_options_as_IfOptions();
1500 
1501   if (schema_params != nullptr) {
1502     params->then_subgraph_index = schema_params->then_subgraph_index();
1503     params->else_subgraph_index = schema_params->else_subgraph_index();
1504   } else {
1505     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1506     // reasonable defaults in the params struct. We are not doing so until we
1507     // better undertand the ramifications of changing the legacy behavior.
1508   }
1509 
1510   *builtin_data = params.release();
1511   return kTfLiteOk;
1512 }
1513 
ParseL2Normalization(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1514 TfLiteStatus ParseL2Normalization(const Operator* op,
1515                                   ErrorReporter* error_reporter,
1516                                   BuiltinDataAllocator* allocator,
1517                                   void** builtin_data) {
1518   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1519 
1520   SafeBuiltinDataAllocator safe_allocator(allocator);
1521   std::unique_ptr<TfLiteL2NormParams,
1522                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1523       params = safe_allocator.Allocate<TfLiteL2NormParams>();
1524   TF_LITE_ENSURE(error_reporter, params != nullptr);
1525 
1526   const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
1527 
1528   if (schema_params != nullptr) {
1529     params->activation =
1530         ConvertActivation(schema_params->fused_activation_function());
1531   } else {
1532     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1533     // reasonable defaults in the params struct. We are not doing so until we
1534     // better undertand the ramifications of changing the legacy behavior.
1535   }
1536 
1537   *builtin_data = params.release();
1538   return kTfLiteOk;
1539 }
1540 
ParseLeakyRelu(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1541 TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
1542                             BuiltinDataAllocator* allocator,
1543                             void** builtin_data) {
1544   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1545 
1546   SafeBuiltinDataAllocator safe_allocator(allocator);
1547   auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
1548   TF_LITE_ENSURE(error_reporter, params != nullptr);
1549   if (const auto* leaky_relu_params =
1550           op->builtin_options_as_LeakyReluOptions()) {
1551     params->alpha = leaky_relu_params->alpha();
1552   }
1553   *builtin_data = params.release();
1554   return kTfLiteOk;
1555 }
1556 
1557 // We have this parse function instead of directly returning kTfLiteOk from the
1558 // switch-case in ParseOpData because this function is used as part of the
1559 // selective registration for the OpResolver implementation in micro.
ParseLess(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1560 TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1561                        void**) {
1562   return kTfLiteOk;
1563 }
1564 
1565 // We have this parse function instead of directly returning kTfLiteOk from the
1566 // switch-case in ParseOpData because this function is used as part of the
1567 // selective registration for the OpResolver implementation in micro.
ParseLessEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1568 TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
1569                             BuiltinDataAllocator*, void**) {
1570   return kTfLiteOk;
1571 }
1572 
1573 // We have this parse function instead of directly returning kTfLiteOk from the
1574 // switch-case in ParseOpData because this function is used as part of the
1575 // selective registration for the OpResolver implementation in micro.
ParseLog(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1576 TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1577                       void**) {
1578   return kTfLiteOk;
1579 }
1580 
1581 // We have this parse function instead of directly returning kTfLiteOk from the
1582 // switch-case in ParseOpData because this function is used as part of the
1583 // selective registration for the OpResolver implementation in micro.
ParseLogicalAnd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1584 TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
1585                              BuiltinDataAllocator*, void**) {
1586   return kTfLiteOk;
1587 }
1588 
1589 // We have this parse function instead of directly returning kTfLiteOk from the
1590 // switch-case in ParseOpData because this function is used as part of the
1591 // selective registration for the OpResolver implementation in micro.
ParseLogicalNot(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1592 TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
1593                              BuiltinDataAllocator*, void**) {
1594   return kTfLiteOk;
1595 }
1596 
1597 // We have this parse function instead of directly returning kTfLiteOk from the
1598 // switch-case in ParseOpData because this function is used as part of the
1599 // selective registration for the OpResolver implementation in micro.
ParseLogicalOr(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1600 TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
1601                             BuiltinDataAllocator*, void**) {
1602   return kTfLiteOk;
1603 }
1604 
1605 // We have this parse function instead of directly returning kTfLiteOk from the
1606 // switch-case in ParseOpData because this function is used as part of the
1607 // selective registration for the OpResolver implementation in micro.
ParseLogistic(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1608 TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
1609                            BuiltinDataAllocator*, void**) {
1610   return kTfLiteOk;
1611 }
1612 
1613 // We have this parse function instead of directly returning kTfLiteOk from the
1614 // switch-case in ParseOpData because this function is used as part of the
1615 // selective registration for the OpResolver implementation in micro.
ParseLogSoftmax(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1616 TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
1617                              BuiltinDataAllocator*, void**) {
1618   return kTfLiteOk;
1619 }
1620 
ParseLSTM(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1621 TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter,
1622                        BuiltinDataAllocator* allocator, void** builtin_data) {
1623   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1624 
1625   SafeBuiltinDataAllocator safe_allocator(allocator);
1626   auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
1627   TF_LITE_ENSURE(error_reporter, params != nullptr);
1628   if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
1629     params->activation =
1630         ConvertActivation(lstm_params->fused_activation_function());
1631     params->cell_clip = lstm_params->cell_clip();
1632     params->proj_clip = lstm_params->proj_clip();
1633     switch (lstm_params->kernel_type()) {
1634       case LSTMKernelType_FULL:
1635         params->kernel_type = kTfLiteLSTMFullKernel;
1636         break;
1637       case LSTMKernelType_BASIC:
1638         params->kernel_type = kTfLiteLSTMBasicKernel;
1639         break;
1640       default:
1641         TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d",
1642                              lstm_params->kernel_type());
1643         return kTfLiteError;
1644     }
1645     params->asymmetric_quantize_inputs =
1646         lstm_params->asymmetric_quantize_inputs();
1647   } else {
1648     TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist");
1649     return kTfLiteError;
1650   }
1651   *builtin_data = params.release();
1652   return kTfLiteOk;
1653 }
1654 
1655 // We have this parse function instead of directly returning kTfLiteOk from the
1656 // switch-case in ParseOpData because this function is used as part of the
1657 // selective registration for the OpResolver implementation in micro.
ParseMaximum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1658 TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
1659                           BuiltinDataAllocator*, void**) {
1660   return kTfLiteOk;
1661 }
1662 
1663 // We have this parse function instead of directly returning kTfLiteOk from the
1664 // switch-case in ParseOpData because this function is used as part of the
1665 // selective registration for the OpResolver implementation in micro.
ParseMinimum(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1666 TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
1667                           BuiltinDataAllocator*, void**) {
1668   return kTfLiteOk;
1669 }
1670 
ParseMirrorPad(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1671 TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
1672                             BuiltinDataAllocator* allocator,
1673                             void** builtin_data) {
1674   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1675 
1676   SafeBuiltinDataAllocator safe_allocator(allocator);
1677   std::unique_ptr<TfLiteMirrorPaddingParams,
1678                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1679       params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
1680   TF_LITE_ENSURE(error_reporter, params != nullptr);
1681 
1682   const MirrorPadOptions* schema_params =
1683       op->builtin_options_as_MirrorPadOptions();
1684 
1685   if (schema_params != nullptr) {
1686     params->mode = ConvertMirrorPadding(schema_params->mode());
1687   } else {
1688     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1689     // reasonable defaults in the params struct. We are not doing so until we
1690     // better undertand the ramifications of changing the legacy behavior.
1691   }
1692 
1693   *builtin_data = params.release();
1694   return kTfLiteOk;
1695 }
1696 
ParseMul(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1697 TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
1698                       BuiltinDataAllocator* allocator, void** builtin_data) {
1699   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1700 
1701   SafeBuiltinDataAllocator safe_allocator(allocator);
1702   std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
1703       params = safe_allocator.Allocate<TfLiteMulParams>();
1704   TF_LITE_ENSURE(error_reporter, params != nullptr);
1705 
1706   const MulOptions* schema_params = op->builtin_options_as_MulOptions();
1707 
1708   if (schema_params != nullptr) {
1709     params->activation =
1710         ConvertActivation(schema_params->fused_activation_function());
1711   } else {
1712     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1713     // reasonable defaults in the params struct. We are not doing so until we
1714     // better undertand the ramifications of changing the legacy behavior.
1715   }
1716 
1717   *builtin_data = params.release();
1718   return kTfLiteOk;
1719 }
1720 
1721 // We have this parse function instead of directly returning kTfLiteOk from the
1722 // switch-case in ParseOpData because this function is used as part of the
1723 // selective registration for the OpResolver implementation in micro.
ParseNeg(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1724 TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1725                       void**) {
1726   return kTfLiteOk;
1727 }
1728 
1729 // We have this parse function instead of directly returning kTfLiteOk from the
1730 // switch-case in ParseOpData because this function is used as part of the
1731 // selective registration for the OpResolver implementation in micro.
ParseNotEqual(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1732 TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
1733                            BuiltinDataAllocator*, void**) {
1734   return kTfLiteOk;
1735 }
1736 
ParsePack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1737 TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
1738                        BuiltinDataAllocator* allocator, void** builtin_data) {
1739   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1740 
1741   SafeBuiltinDataAllocator safe_allocator(allocator);
1742   std::unique_ptr<TfLitePackParams,
1743                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1744       params = safe_allocator.Allocate<TfLitePackParams>();
1745   TF_LITE_ENSURE(error_reporter, params != nullptr);
1746 
1747   const PackOptions* schema_params = op->builtin_options_as_PackOptions();
1748 
1749   if (schema_params != nullptr) {
1750     params->values_count = schema_params->values_count();
1751     params->axis = schema_params->axis();
1752   } else {
1753     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1754     // reasonable defaults in the params struct. We are not doing so until we
1755     // better undertand the ramifications of changing the legacy behavior.
1756   }
1757 
1758   *builtin_data = params.release();
1759   return kTfLiteOk;
1760 }
1761 
1762 // We have this parse function instead of directly returning kTfLiteOk from the
1763 // switch-case in ParseOpData because this function is used as part of the
1764 // selective registration for the OpResolver implementation in micro.
ParsePad(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1765 TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1766                       void**) {
1767   return kTfLiteOk;
1768 }
1769 
1770 // We have this parse function instead of directly returning kTfLiteOk from the
1771 // switch-case in ParseOpData because this function is used as part of the
1772 // selective registration for the OpResolver implementation in micro.
ParsePadV2(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1773 TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1774                         void**) {
1775   return kTfLiteOk;
1776 }
1777 
ParsePool(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1778 TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
1779                        BuiltinDataAllocator* allocator, void** builtin_data) {
1780   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1781 
1782   SafeBuiltinDataAllocator safe_allocator(allocator);
1783   std::unique_ptr<TfLitePoolParams,
1784                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1785       params = safe_allocator.Allocate<TfLitePoolParams>();
1786   TF_LITE_ENSURE(error_reporter, params != nullptr);
1787 
1788   const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
1789 
1790   if (schema_params != nullptr) {
1791     params->padding = ConvertPadding(schema_params->padding());
1792     params->stride_width = schema_params->stride_w();
1793     params->stride_height = schema_params->stride_h();
1794     params->filter_width = schema_params->filter_width();
1795     params->filter_height = schema_params->filter_height();
1796     params->activation =
1797         ConvertActivation(schema_params->fused_activation_function());
1798   } else {
1799     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1800     // reasonable defaults in the params struct. We are not doing so until we
1801     // better undertand the ramifications of changing the legacy behavior.
1802   }
1803 
1804   *builtin_data = params.release();
1805   return kTfLiteOk;
1806 }
1807 
1808 // We have this parse function instead of directly returning kTfLiteOk from the
1809 // switch-case in ParseOpData because this function is used as part of the
1810 // selective registration for the OpResolver implementation in micro.
ParsePow(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1811 TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1812                       void**) {
1813   return kTfLiteOk;
1814 }
1815 
1816 // We have this parse function instead of directly returning kTfLiteOk from the
1817 // switch-case in ParseOpData because this function is used as part of the
1818 // selective registration for the OpResolver implementation in micro.
ParsePrelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1819 TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1820                         void**) {
1821   return kTfLiteOk;
1822 }
1823 
1824 // We have this parse function instead of directly returning kTfLiteOk from the
1825 // switch-case in ParseOpData because this function is used as part of the
1826 // selective registration for the OpResolver implementation in micro.
ParseQuantize(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1827 TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
1828                            BuiltinDataAllocator*, void**) {
1829   return kTfLiteOk;
1830 }
1831 
1832 // We have this parse function instead of directly returning kTfLiteOk from the
1833 // switch-case in ParseOpData because this function is used as part of the
1834 // selective registration for the OpResolver implementation in micro.
ParseReadVariable(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1835 TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,
1836                                BuiltinDataAllocator*, void**) {
1837   return kTfLiteOk;
1838 }
1839 
ParseReducer(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1840 TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
1841                           BuiltinDataAllocator* allocator,
1842                           void** builtin_data) {
1843   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1844 
1845   SafeBuiltinDataAllocator safe_allocator(allocator);
1846 
1847   std::unique_ptr<TfLiteReducerParams,
1848                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1849       params = safe_allocator.Allocate<TfLiteReducerParams>();
1850   TF_LITE_ENSURE(error_reporter, params != nullptr);
1851 
1852   const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
1853 
1854   if (schema_params != nullptr) {
1855     params->keep_dims = schema_params->keep_dims();
1856   } else {
1857     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1858     // reasonable defaults in the params struct. We are not doing so until we
1859     // better undertand the ramifications of changing the legacy behavior.
1860   }
1861 
1862   *builtin_data = params.release();
1863   return kTfLiteOk;
1864 }
1865 
1866 // We have this parse function instead of directly returning kTfLiteOk from the
1867 // switch-case in ParseOpData because this function is used as part of the
1868 // selective registration for the OpResolver implementation in micro.
ParseRelu(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1869 TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1870                        void**) {
1871   return kTfLiteOk;
1872 }
1873 
1874 // We have this parse function instead of directly returning kTfLiteOk from the
1875 // switch-case in ParseOpData because this function is used as part of the
1876 // selective registration for the OpResolver implementation in micro.
ParseRelu6(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1877 TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1878                         void**) {
1879   return kTfLiteOk;
1880 }
1881 
ParseReshape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1882 TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
1883                           BuiltinDataAllocator* allocator,
1884                           void** builtin_data) {
1885   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1886 
1887   SafeBuiltinDataAllocator safe_allocator(allocator);
1888 
1889   std::unique_ptr<TfLiteReshapeParams,
1890                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1891       params = safe_allocator.Allocate<TfLiteReshapeParams>();
1892   TF_LITE_ENSURE(error_reporter, params != nullptr);
1893 
1894   const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
1895 
1896   if (schema_params != nullptr) {
1897     const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
1898     if (new_shape != nullptr) {
1899       TF_LITE_ENSURE_STATUS(
1900           FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
1901                                      params->shape, error_reporter, "reshape"));
1902       params->num_dimensions = new_shape->size();
1903     } else {
1904       // TODO(b/157480169) TODO(b/147203660): We should either return
1905       // kTfLiteError or fill in some reasonable defaults in the params struct.
1906       // We are not doing so until we better undertand the ramifications of
1907       // changing the legacy behavior.
1908     }
1909   } else {
1910     // TODO(b/157480169): We should either return kTfLiteError or fill in some
1911     // reasonable defaults in the params struct. We are not doing so until we
1912     // better undertand the ramifications of changing the legacy behavior.
1913   }
1914 
1915   *builtin_data = params.release();
1916   return kTfLiteOk;
1917 }
1918 
ParseResizeBilinear(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1919 TfLiteStatus ParseResizeBilinear(const Operator* op,
1920                                  ErrorReporter* error_reporter,
1921                                  BuiltinDataAllocator* allocator,
1922                                  void** builtin_data) {
1923   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1924 
1925   SafeBuiltinDataAllocator safe_allocator(allocator);
1926   std::unique_ptr<TfLiteResizeBilinearParams,
1927                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1928       params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
1929   TF_LITE_ENSURE(error_reporter, params != nullptr);
1930 
1931   const ResizeBilinearOptions* schema_params =
1932       op->builtin_options_as_ResizeBilinearOptions();
1933 
1934   if (schema_params != nullptr) {
1935     params->align_corners = schema_params->align_corners();
1936     params->half_pixel_centers = schema_params->half_pixel_centers();
1937   } else {
1938     params->align_corners = false;
1939     params->half_pixel_centers = false;
1940   }
1941 
1942   *builtin_data = params.release();
1943   return kTfLiteOk;
1944 }
1945 
ParseResizeNearestNeighbor(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1946 TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
1947                                         ErrorReporter* error_reporter,
1948                                         BuiltinDataAllocator* allocator,
1949                                         void** builtin_data) {
1950   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
1951 
1952   SafeBuiltinDataAllocator safe_allocator(allocator);
1953   std::unique_ptr<TfLiteResizeNearestNeighborParams,
1954                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
1955       params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
1956   TF_LITE_ENSURE(error_reporter, params != nullptr);
1957 
1958   const ResizeNearestNeighborOptions* schema_params =
1959       op->builtin_options_as_ResizeNearestNeighborOptions();
1960 
1961   if (schema_params != nullptr) {
1962     params->align_corners = schema_params->align_corners();
1963     params->half_pixel_centers = schema_params->half_pixel_centers();
1964   } else {
1965     params->align_corners = false;
1966     params->half_pixel_centers = false;
1967   }
1968 
1969   *builtin_data = params.release();
1970   return kTfLiteOk;
1971 }
1972 
1973 // We have this parse function instead of directly returning kTfLiteOk from the
1974 // switch-case in ParseOpData because this function is used as part of the
1975 // selective registration for the OpResolver implementation in micro.
ParseRound(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1976 TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1977                         void**) {
1978   return kTfLiteOk;
1979 }
1980 
1981 // We have this parse function instead of directly returning kTfLiteOk from the
1982 // switch-case in ParseOpData because this function is used as part of the
1983 // selective registration for the OpResolver implementation in micro.
ParseRsqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1984 TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
1985                         void**) {
1986   return kTfLiteOk;
1987 }
1988 
1989 // We have this parse function instead of directly returning kTfLiteOk from the
1990 // switch-case in ParseOpData because this function is used as part of the
1991 // selective registration for the OpResolver implementation in micro.
ParseSelectV2(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)1992 TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*,
1993                            BuiltinDataAllocator*, void**) {
1994   return kTfLiteOk;
1995 }
1996 
ParseShape(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)1997 TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
1998                         BuiltinDataAllocator* allocator, void** builtin_data) {
1999   SafeBuiltinDataAllocator safe_allocator(allocator);
2000   std::unique_ptr<TfLiteShapeParams,
2001                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2002       params = safe_allocator.Allocate<TfLiteShapeParams>();
2003   TF_LITE_ENSURE(error_reporter, params != nullptr);
2004 
2005   const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
2006 
2007   if (schema_params != nullptr) {
2008     TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
2009                                             &params->out_type, error_reporter));
2010   } else {
2011     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2012     // reasonable defaults in the params struct. We are not doing so until we
2013     // better undertand the ramifications of changing the legacy behavior.
2014   }
2015 
2016   *builtin_data = params.release();
2017   return kTfLiteOk;
2018 }
2019 
2020 // We have this parse function instead of directly returning kTfLiteOk from the
2021 // switch-case in ParseOpData because this function is used as part of the
2022 // selective registration for the OpResolver implementation in micro.
ParseSin(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2023 TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2024                       void**) {
2025   return kTfLiteOk;
2026 }
2027 
2028 // We have this parse function instead of directly returning kTfLiteOk from the
2029 // switch-case in ParseOpData because this function is used as part of the
2030 // selective registration for the OpResolver implementation in micro.
ParseSlice(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2031 TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2032                         void**) {
2033   return kTfLiteOk;
2034 }
2035 
ParseSoftmax(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2036 TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
2037                           BuiltinDataAllocator* allocator,
2038                           void** builtin_data) {
2039   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2040 
2041   SafeBuiltinDataAllocator safe_allocator(allocator);
2042   std::unique_ptr<TfLiteSoftmaxParams,
2043                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2044       params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
2045   TF_LITE_ENSURE(error_reporter, params != nullptr);
2046 
2047   const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
2048 
2049   if (schema_params != nullptr) {
2050     params->beta = schema_params->beta();
2051   } else {
2052     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2053     // reasonable defaults in the params struct. We are not doing so until we
2054     // better undertand the ramifications of changing the legacy behavior.
2055   }
2056 
2057   *builtin_data = params.release();
2058   return kTfLiteOk;
2059 }
2060 
2061 // We have this parse function instead of directly returning kTfLiteOk from the
2062 // switch-case in ParseOpData because this function is used as part of the
2063 // selective registration for the OpResolver implementation in micro.
ParseSpaceToBatchNd(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2064 TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
2065                                  BuiltinDataAllocator*, void**) {
2066   return kTfLiteOk;
2067 }
2068 
ParseSpaceToDepth(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2069 TfLiteStatus ParseSpaceToDepth(const Operator* op,
2070                                ErrorReporter* error_reporter,
2071                                BuiltinDataAllocator* allocator,
2072                                void** builtin_data) {
2073   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2074 
2075   SafeBuiltinDataAllocator safe_allocator(allocator);
2076   std::unique_ptr<TfLiteSpaceToDepthParams,
2077                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2078       params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
2079   TF_LITE_ENSURE(error_reporter, params != nullptr);
2080 
2081   const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
2082   if (schema_params != nullptr) {
2083     params->block_size = schema_params->block_size();
2084   } else {
2085     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2086     // reasonable defaults in the params struct. We are not doing so until we
2087     // better undertand the ramifications of changing the legacy behavior.
2088   }
2089 
2090   *builtin_data = params.release();
2091   return kTfLiteOk;
2092 }
2093 
ParseSplit(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2094 TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
2095                         BuiltinDataAllocator* allocator, void** builtin_data) {
2096   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2097 
2098   SafeBuiltinDataAllocator safe_allocator(allocator);
2099   std::unique_ptr<TfLiteSplitParams,
2100                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2101       params = safe_allocator.Allocate<TfLiteSplitParams>();
2102   TF_LITE_ENSURE(error_reporter, params != nullptr);
2103 
2104   const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
2105 
2106   if (schema_params != nullptr) {
2107     params->num_splits = schema_params->num_splits();
2108   } else {
2109     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2110     // reasonable defaults in the params struct. We are not doing so until we
2111     // better undertand the ramifications of changing the legacy behavior.
2112   }
2113 
2114   *builtin_data = params.release();
2115   return kTfLiteOk;
2116 }
2117 
ParseSplitV(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2118 TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
2119                          BuiltinDataAllocator* allocator, void** builtin_data) {
2120   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2121   SafeBuiltinDataAllocator safe_allocator(allocator);
2122 
2123   std::unique_ptr<TfLiteSplitVParams,
2124                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2125       params = safe_allocator.Allocate<TfLiteSplitVParams>();
2126   TF_LITE_ENSURE(error_reporter, params != nullptr);
2127 
2128   const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
2129 
2130   if (schema_params != nullptr) {
2131     params->num_splits = schema_params->num_splits();
2132   } else {
2133     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2134     // reasonable defaults in the params struct. We are not doing so until we
2135     // better undertand the ramifications of changing the legacy behavior.
2136   }
2137 
2138   *builtin_data = params.release();
2139   return kTfLiteOk;
2140 }
2141 
ParseUnidirectionalSequenceLSTM(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2142 TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
2143                                              ErrorReporter* error_reporter,
2144                                              BuiltinDataAllocator* allocator,
2145                                              void** builtin_data) {
2146   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2147   SafeBuiltinDataAllocator safe_allocator(allocator);
2148   auto params =
2149       safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
2150   TF_LITE_ENSURE(error_reporter, params != nullptr);
2151   if (const auto* seq_lstm_params =
2152           op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
2153     params->activation =
2154         ConvertActivation(seq_lstm_params->fused_activation_function());
2155     params->cell_clip = seq_lstm_params->cell_clip();
2156     params->proj_clip = seq_lstm_params->proj_clip();
2157     params->time_major = seq_lstm_params->time_major();
2158     params->asymmetric_quantize_inputs =
2159         seq_lstm_params->asymmetric_quantize_inputs();
2160   }
2161   *builtin_data = params.release();
2162   return kTfLiteOk;
2163 }
2164 
ParseSqueeze(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2165 TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
2166                           BuiltinDataAllocator* allocator,
2167                           void** builtin_data) {
2168   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2169   SafeBuiltinDataAllocator safe_allocator(allocator);
2170 
2171   std::unique_ptr<TfLiteSqueezeParams,
2172                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2173       params = safe_allocator.Allocate<TfLiteSqueezeParams>();
2174   TF_LITE_ENSURE(error_reporter, params != nullptr);
2175 
2176   const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
2177 
2178   if (schema_params != nullptr) {
2179     const auto* squeeze_dims = schema_params->squeeze_dims();
2180     if (squeeze_dims != nullptr) {
2181       TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
2182           sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
2183           error_reporter, "squeeze"));
2184       params->num_squeeze_dims = squeeze_dims->size();
2185     } else {
2186       params->num_squeeze_dims = 0;
2187     }
2188   } else {
2189     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2190     // reasonable defaults in the params struct. We are not doing so until we
2191     // better undertand the ramifications of changing the legacy behavior.
2192   }
2193 
2194   *builtin_data = params.release();
2195   return kTfLiteOk;
2196 }
2197 
2198 // We have this parse function instead of directly returning kTfLiteOk from the
2199 // switch-case in ParseOpData because this function is used as part of the
2200 // selective registration for the OpResolver implementation in micro.
ParseSqrt(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2201 TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2202                        void**) {
2203   return kTfLiteOk;
2204 }
2205 
2206 // We have this parse function instead of directly returning kTfLiteOk from the
2207 // switch-case in ParseOpData because this function is used as part of the
2208 // selective registration for the OpResolver implementation in micro.
ParseSquare(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2209 TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2210                          void**) {
2211   return kTfLiteOk;
2212 }
2213 
2214 // We have this parse function instead of directly returning kTfLiteOk from the
2215 // switch-case in ParseOpData because this function is used as part of the
2216 // selective registration for the OpResolver implementation in micro.
ParseSquaredDifference(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2217 TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*,
2218                                     BuiltinDataAllocator*, void**) {
2219   return kTfLiteOk;
2220 }
2221 
ParseStridedSlice(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2222 TfLiteStatus ParseStridedSlice(const Operator* op,
2223                                ErrorReporter* error_reporter,
2224                                BuiltinDataAllocator* allocator,
2225                                void** builtin_data) {
2226   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2227 
2228   SafeBuiltinDataAllocator safe_allocator(allocator);
2229   std::unique_ptr<TfLiteStridedSliceParams,
2230                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2231       params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
2232   TF_LITE_ENSURE(error_reporter, params != nullptr);
2233 
2234   const StridedSliceOptions* schema_params =
2235       op->builtin_options_as_StridedSliceOptions();
2236 
2237   if (schema_params != nullptr) {
2238     params->begin_mask = schema_params->begin_mask();
2239     params->end_mask = schema_params->end_mask();
2240     params->ellipsis_mask = schema_params->ellipsis_mask();
2241     params->new_axis_mask = schema_params->new_axis_mask();
2242     params->shrink_axis_mask = schema_params->shrink_axis_mask();
2243   } else {
2244     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2245     // reasonable defaults in the params struct. We are not doing so until we
2246     // better undertand the ramifications of changing the legacy behavior.
2247   }
2248 
2249   *builtin_data = params.release();
2250   return kTfLiteOk;
2251 }
2252 
ParseSub(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2253 TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
2254                       BuiltinDataAllocator* allocator, void** builtin_data) {
2255   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2256 
2257   SafeBuiltinDataAllocator safe_allocator(allocator);
2258   std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
2259       params = safe_allocator.Allocate<TfLiteSubParams>();
2260   TF_LITE_ENSURE(error_reporter, params != nullptr);
2261 
2262   const SubOptions* schema_params = op->builtin_options_as_SubOptions();
2263 
2264   if (schema_params != nullptr) {
2265     params->activation =
2266         ConvertActivation(schema_params->fused_activation_function());
2267     params->pot_scale_int16 = schema_params->pot_scale_int16();
2268   } else {
2269     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2270     // reasonable defaults in the params struct. We are not doing so until we
2271     // better undertand the ramifications of changing the legacy behavior.
2272   }
2273 
2274   *builtin_data = params.release();
2275   return kTfLiteOk;
2276 }
2277 
ParseSvdf(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2278 TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
2279                        BuiltinDataAllocator* allocator, void** builtin_data) {
2280   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2281 
2282   SafeBuiltinDataAllocator safe_allocator(allocator);
2283   std::unique_ptr<TfLiteSVDFParams,
2284                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2285       params = safe_allocator.Allocate<TfLiteSVDFParams>();
2286   TF_LITE_ENSURE(error_reporter, params != nullptr);
2287 
2288   const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
2289   if (schema_params != nullptr) {
2290     params->rank = schema_params->rank();
2291     params->activation =
2292         ConvertActivation(schema_params->fused_activation_function());
2293     params->asymmetric_quantize_inputs =
2294         schema_params->asymmetric_quantize_inputs();
2295   } else {
2296     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2297     // reasonable defaults in the params struct. We are not doing so until we
2298     // better undertand the ramifications of changing the legacy behavior.
2299   }
2300 
2301   *builtin_data = params.release();
2302   return kTfLiteOk;
2303 }
2304 
2305 // We have this parse function instead of directly returning kTfLiteOk from the
2306 // switch-case in ParseOpData because this function is used as part of the
2307 // selective registration for the OpResolver implementation in micro.
ParseTanh(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2308 TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
2309                        void**) {
2310   return kTfLiteOk;
2311 }
2312 //
2313 // We have this parse function instead of directly returning kTfLiteOk from the
2314 // switch-case in ParseOpData because this function is used as part of the
2315 // selective registration for the OpResolver implementation in micro.
ParseTranspose(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2316 TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
2317                             BuiltinDataAllocator*, void**) {
2318   return kTfLiteOk;
2319 }
2320 
ParseTransposeConv(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2321 TfLiteStatus ParseTransposeConv(const Operator* op,
2322                                 ErrorReporter* error_reporter,
2323                                 BuiltinDataAllocator* allocator,
2324                                 void** builtin_data) {
2325   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2326 
2327   SafeBuiltinDataAllocator safe_allocator(allocator);
2328   std::unique_ptr<TfLiteTransposeConvParams,
2329                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2330       params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
2331   TF_LITE_ENSURE(error_reporter, params != nullptr);
2332   const TransposeConvOptions* transpose_conv_params =
2333       op->builtin_options_as_TransposeConvOptions();
2334   if (transpose_conv_params != nullptr) {
2335     params->padding = ConvertPadding(transpose_conv_params->padding());
2336     params->stride_width = transpose_conv_params->stride_w();
2337     params->stride_height = transpose_conv_params->stride_h();
2338   } else {
2339     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2340     // reasonable defaults in the params struct. We are not doing so until we
2341     // better undertand the ramifications of changing the legacy behavior.
2342   }
2343   *builtin_data = params.release();
2344   return kTfLiteOk;
2345 }
2346 
ParseUnpack(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2347 TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
2348                          BuiltinDataAllocator* allocator, void** builtin_data) {
2349   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2350 
2351   SafeBuiltinDataAllocator safe_allocator(allocator);
2352   std::unique_ptr<TfLiteUnpackParams,
2353                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2354       params = safe_allocator.Allocate<TfLiteUnpackParams>();
2355   TF_LITE_ENSURE(error_reporter, params != nullptr);
2356 
2357   const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
2358 
2359   if (schema_params != nullptr) {
2360     params->num = schema_params->num();
2361     params->axis = schema_params->axis();
2362   } else {
2363     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2364     // reasonable defaults in the params struct. We are not doing so until we
2365     // better undertand the ramifications of changing the legacy behavior.
2366   }
2367 
2368   *builtin_data = params.release();
2369   return kTfLiteOk;
2370 }
2371 
ParseVarHandle(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2372 TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
2373                             BuiltinDataAllocator* allocator,
2374                             void** builtin_data) {
2375   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2376 
2377   SafeBuiltinDataAllocator safe_allocator(allocator);
2378   std::unique_ptr<TfLiteVarHandleParams,
2379                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2380       params = safe_allocator.Allocate<TfLiteVarHandleParams>();
2381   TF_LITE_ENSURE(error_reporter, params != nullptr);
2382 
2383   const VarHandleOptions* schema_params =
2384       op->builtin_options_as_VarHandleOptions();
2385 
2386   if (schema_params != nullptr) {
2387     if (schema_params->container()) {
2388       params->container = schema_params->container()->c_str();
2389     }
2390     if (schema_params->shared_name()) {
2391       params->shared_name = schema_params->shared_name()->c_str();
2392     }
2393   } else {
2394     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2395     // reasonable defaults in the params struct. We are not doing so until we
2396     // better undertand the ramifications of changing the legacy behavior.
2397   }
2398 
2399   *builtin_data = params.release();
2400   return kTfLiteOk;
2401 }
2402 
ParseWhile(const Operator * op,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2403 TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter,
2404                         BuiltinDataAllocator* allocator, void** builtin_data) {
2405   CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
2406 
2407   SafeBuiltinDataAllocator safe_allocator(allocator);
2408   std::unique_ptr<TfLiteWhileParams,
2409                   SafeBuiltinDataAllocator::BuiltinDataDeleter>
2410       params = safe_allocator.Allocate<TfLiteWhileParams>();
2411   TF_LITE_ENSURE(error_reporter, params != nullptr);
2412 
2413   const WhileOptions* schema_params = op->builtin_options_as_WhileOptions();
2414 
2415   if (schema_params != nullptr) {
2416     params->cond_subgraph_index = schema_params->cond_subgraph_index();
2417     params->body_subgraph_index = schema_params->body_subgraph_index();
2418   } else {
2419     // TODO(b/157480169): We should either return kTfLiteError or fill in some
2420     // reasonable defaults in the params struct. We are not doing so until we
2421     // better undertand the ramifications of changing the legacy behavior.
2422   }
2423 
2424   *builtin_data = params.release();
2425   return kTfLiteOk;
2426 }
2427 
2428 // We have this parse function instead of directly returning kTfLiteOk from the
2429 // switch-case in ParseOpData because this function is used as part of the
2430 // selective registration for the OpResolver implementation in micro.
ParseZerosLike(const Operator *,ErrorReporter *,BuiltinDataAllocator *,void **)2431 TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
2432                             BuiltinDataAllocator*, void**) {
2433   return kTfLiteOk;
2434 }
2435 
ParseOpData(const Operator * op,BuiltinOperator op_type,ErrorReporter * error_reporter,BuiltinDataAllocator * allocator,void ** builtin_data)2436 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
2437                          ErrorReporter* error_reporter,
2438                          BuiltinDataAllocator* allocator, void** builtin_data) {
2439 // TODO(b/145762662): It would be preferable to have the build graph for TF Lite
2440 // Micro not have the ParseOpData function at all. This would require splitting
2441 // the current file into two separate files, one of which defines the
2442 // ParseOpData function and the other that defines the operator specific parse
2443 // functions (e.g. ParseAdd).
2444 //
2445 // Such a split was attempted but was not worth the effort at the time because
2446 // of the following reasons:
2447 //  * We could either duplicate the functions and the SafeBuiltinDataAllocator
2448 //    class in the anonymous namespace of this file, or attempt to make a common
2449 //    library with these helper functions and class.
2450 //  * Making a common library with a separate build target was not feasible as
2451 //    it introduced circular dependencies due to the ErrorReporter and a common
2452 //    .cc and .h within the same api build target the also cause circular
2453 //    dependencies due to the  BuiltinDataAllocator class.
2454 //  * If all the builtin operators were to have their own parse functions, or we
2455 //    were ok with some amount of code duplication, then this split of the .cc
2456 //    files would be a lot more feasible.
2457 #ifdef TF_LITE_STATIC_MEMORY
2458   TF_LITE_REPORT_ERROR(
2459       error_reporter,
2460       "ParseOpData is unsupported on TfLiteMicro, please use the operator "
2461       "specific parse functions (e.g. ParseAdd etc.).\n");
2462   return kTfLiteError;
2463 #else
2464   return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
2465                            builtin_data);
2466 #endif
2467 }
2468 
2469 }  // namespace tflite
2470