1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
16 #define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
17
18 #include <stdint.h>
19
20 #include <limits>
21 #ifndef TF_LITE_STATIC_MEMORY
22 #include <string>
23 #endif // TF_LITE_STATIC_MEMORY
24
25 #include "tensorflow/lite/c/builtin_op_data.h"
26 #include "tensorflow/lite/c/common.h"
27
28 namespace tflite {
29
30 // A fair number of functions in this header have historically been inline.
31 // It is ok to change functions to not be inline if the latency with
32 // benchmark_model for MobileNet + MobileBERT is unaffected. If such a change is
33 // made, move the newly non-inlined function declarations to the top of this
34 // header file.
35
36 // Note: You must check if result is not null:
37 //
38 // TfLiteTensor* my_tensor = GetInput(context, node, kMyTensorIdx);
39 // TF_LITE_ENSURE(context, my_tensor != nullptr);
40 //
41 // This is because the index might point to the optional tensor constant
42 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
43 const TfLiteTensor* GetInput(const TfLiteContext* context,
44 const TfLiteNode* node, int index);
45
46 // Same as `GetInput` but returns boolean and uses output argument for tensor.
47 //
48 // TfLiteTensor* my_tensor;
49 // TF_LITE_ENSURE_OK(context,
50 // GetInputSafe(context, node, kMyTensorIdx, &my_tensor));
51 // // can use my_tensor directly from here onwards, it is not nullptr
52 //
53 // Should be used in cases where the binary size is too large.
54 TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
55 int index, const TfLiteTensor** tensor);
56
57 // Note: You must check if result is not null:
58 //
59 // TfLiteTensor* my_tensor = GetVariableInput(context, node, kMyTensorIdx);
60 // TF_LITE_ENSURE(context, my_tensor != nullptr);
61 //
62 // This is because the index might point to the optional tensor constant
63 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
64 TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
65 int index);
66
67 // Note: You must check if result is not null:
68 //
69 // TfLiteTensor* my_tensor = GetOutput(context, node, kMyTensorIdx);
70 // TF_LITE_ENSURE(context, my_tensor != nullptr);
71 //
72 // This is because the index might point to the optional tensor constant
73 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
74 TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
75 int index);
76
77 // Same as `GetOutput` but returns boolean and uses output argument for tensor.
78 //
79 // TfLiteTensor* my_tensor;
80 // TF_LITE_ENSURE_OK(context,
81 // GetOutputSafe(context, node, kMyTensorIdx, &my_tensor));
82 // // can use my_tensor directly from here onwards, it is not nullptr
83 //
84 // Should be used in cases where the binary size is too large.
85 TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
86 int index, TfLiteTensor** tensor);
87
88 // Note: You must check if result is not null:
89 //
90 // TfLiteTensor* my_tensor = GetOptionalInputTensor(context, node, kIdx);
91 // TF_LITE_ENSURE(context, my_tensor != nullptr);
92 //
93 // This is because the index might point to the optional tensor constant
94 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
95 //
96 // Deprecated. GetInput has the same functionality.
97 const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
98 const TfLiteNode* node, int index);
99
100 #ifndef TF_LITE_STATIC_MEMORY
101 // Note: You must check if result is not null:
102 //
103 // TfLiteTensor* my_tensor = GetTemporary(context, node, kMyTensorIdx);
104 // TF_LITE_ENSURE(context, my_tensor != nullptr);
105 //
106 // This is because the index might point to the optional tensor constant
107 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
108 TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
109 int index);
110
111 // Same as `GetTemporary` but returns boolean and uses output argument for
112 // tensor.
113 //
114 // TfLiteTensor* my_tensor;
115 // TF_LITE_ENSURE_OK(context,
116 // GetTemporarySafe(context, node, kMyTensorIdx,
117 // &my_tensor));
118 // // can use my_tensor directly from here onwards, it is not nullptr
119 //
120 // Should be used in cases where the binary size is too large.
121 TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
122 const TfLiteNode* node, int index,
123 TfLiteTensor** tensor);
124
125 // Note: You must check if result is not null:
126 //
127 // TfLiteTensor* my_tensor = GetIntermediates(context, node, kMyTensorIdx);
128 // TF_LITE_ENSURE(context, my_tensor != nullptr);
129 //
130 // This is because the index might point to the optional tensor constant
131 // (kTfLiteOptionalTensor) in which case there is no tensor to return.
132 const TfLiteTensor* GetIntermediates(TfLiteContext* context,
133 const TfLiteNode* node, int index);
134
135 // Same as `GetIntermediates` but returns boolean and uses output argument for
136 // tensor.
137 //
138 // TfLiteTensor* my_tensor;
139 // TF_LITE_ENSURE_OK(context,
140 // GetIntermediatesSafe(context, node, kMyTensorIdx,
141 // &my_tensor));
142 // // can use my_tensor directly from here onwards, it is not nullptr
143 //
144 // Should be used in cases where the binary size is too large.
145 TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
146 const TfLiteNode* node, int index,
147 TfLiteTensor** tensor);
148 #endif // TF_LITE_STATIC_MEMORY
149
NumDimensions(const TfLiteTensor * t)150 inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
SizeOfDimension(const TfLiteTensor * t,int dim)151 inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
152 return t->dims->data[dim];
153 }
154
NumInputs(const TfLiteNode * node)155 inline int NumInputs(const TfLiteNode* node) {
156 return node->inputs == nullptr ? 0 : node->inputs->size;
157 }
NumOutputs(const TfLiteNode * node)158 inline int NumOutputs(const TfLiteNode* node) {
159 return node->outputs == nullptr ? 0 : node->outputs->size;
160 }
161
162 #ifndef TF_LITE_STATIC_MEMORY
NumIntermediates(const TfLiteNode * node)163 inline int NumIntermediates(const TfLiteNode* node) {
164 return node->intermediates->size;
165 }
166 #endif // TF_LITE_STATIC_MEMORY
167
NumElements(const TfLiteIntArray * dims)168 inline int64_t NumElements(const TfLiteIntArray* dims) {
169 int64_t count = 1;
170 for (int i = 0; i < dims->size; ++i) {
171 count *= dims->data[i];
172 }
173 return count;
174 }
175
NumElements(const TfLiteTensor * t)176 inline int64_t NumElements(const TfLiteTensor* t) {
177 return NumElements(t->dims);
178 }
179
NumElements(const int * dims,int num_dims)180 inline int64_t NumElements(const int* dims, int num_dims) {
181 int64_t count = 1;
182 for (int i = 0; i < num_dims; ++i) {
183 count *= dims[i];
184 }
185 return count;
186 }
187
188 // Determines whether tensor is constant.
189 // TODO(b/138199592): Introduce new query which checks for constant OR
190 // persistent-read-only, which would be useful for most tensor kernels that
191 // are potentially dynamic based on the input tensor value availability at the
192 // time of prepare.
IsConstantTensor(const TfLiteTensor * tensor)193 inline bool IsConstantTensor(const TfLiteTensor* tensor) {
194 return tensor->allocation_type == kTfLiteMmapRo;
195 }
196
IsConstantOrPersistentTensor(const TfLiteTensor * tensor)197 inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) {
198 return IsConstantTensor(tensor) ||
199 (tensor->allocation_type == kTfLitePersistentRo);
200 }
201
202 // Determines whether tensor is dynamic. Note that a tensor can be non-const and
203 // not dynamic. This function specifically checks for a dynamic tensor.
IsDynamicTensor(const TfLiteTensor * tensor)204 inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
205 return tensor->allocation_type == kTfLiteDynamic;
206 }
207
208 // Sets tensor to dynamic.
SetTensorToDynamic(TfLiteTensor * tensor)209 inline void SetTensorToDynamic(TfLiteTensor* tensor) {
210 if (tensor->allocation_type != kTfLiteDynamic) {
211 tensor->allocation_type = kTfLiteDynamic;
212 tensor->data.raw = nullptr;
213 }
214 }
215
216 // Sets tensor to persistent and read-only.
SetTensorToPersistentRo(TfLiteTensor * tensor)217 inline void SetTensorToPersistentRo(TfLiteTensor* tensor) {
218 if (tensor->allocation_type != kTfLitePersistentRo) {
219 tensor->allocation_type = kTfLitePersistentRo;
220 tensor->data.raw = nullptr;
221 }
222 }
223
224 // Determines whether it is a hybrid op - one that has float inputs and
225 // quantized weights.
IsHybridOp(const TfLiteTensor * input,const TfLiteTensor * weight)226 inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) {
227 return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) &&
228 input->type == kTfLiteFloat32);
229 }
230
231 // Check dimensionality match and populate OpData for Conv and DepthwiseConv.
232 TfLiteStatus PopulateConvolutionQuantizationParams(
233 TfLiteContext* context, const TfLiteTensor* input,
234 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
235 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
236 int32_t* output_activation_min, int32_t* output_activation_max,
237 int32_t* per_channel_multiplier, int32_t* per_channel_shift);
238
239 TfLiteStatus PopulateConvolutionQuantizationParams(
240 TfLiteContext* context, const TfLiteTensor* input,
241 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
242 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
243 int32_t* output_activation_min, int32_t* output_activation_max,
244 int32_t* per_channel_multiplier, int32_t* per_channel_shift,
245 int num_channels);
246
247 // Calculates the multiplication factor for a quantized convolution (or
248 // quantized depthwise convolution) involving the given tensors. Returns an
249 // error if the scales of the tensors are not compatible.
250 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
251 const TfLiteTensor* input,
252 const TfLiteTensor* filter,
253 const TfLiteTensor* bias,
254 TfLiteTensor* output,
255 double* multiplier);
256
257 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
258 const TfLiteTensor* input,
259 const TfLiteTensor* filter,
260 TfLiteTensor* output,
261 double* multiplier);
262
263 // Calculates the useful quantized range of an activation layer given its
264 // activation tensor.
265 TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
266 TfLiteFusedActivation activation,
267 TfLiteTensor* output,
268 int32_t* act_min,
269 int32_t* act_max);
270
271 // Calculates the useful range of an activation layer given its activation
272 // tensor.a
273 template <typename T>
CalculateActivationRange(TfLiteFusedActivation activation,T * activation_min,T * activation_max)274 void CalculateActivationRange(TfLiteFusedActivation activation,
275 T* activation_min, T* activation_max) {
276 if (activation == kTfLiteActRelu) {
277 *activation_min = 0;
278 *activation_max = std::numeric_limits<T>::max();
279 } else if (activation == kTfLiteActRelu6) {
280 *activation_min = 0;
281 *activation_max = 6;
282 } else if (activation == kTfLiteActReluN1To1) {
283 *activation_min = -1;
284 *activation_max = 1;
285 } else {
286 *activation_min = std::numeric_limits<T>::lowest();
287 *activation_max = std::numeric_limits<T>::max();
288 }
289 }
290
291 // Return true if the given tensors have the same shape.
292 bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
293
294 #if !defined(TF_LITE_STATIC_MEMORY)
295 // Gets the output shape from the input tensor.
296 TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
297 const TfLiteTensor* input,
298 TfLiteIntArray** output_shape);
299
300 const std::string GetShapeDebugString(const TfLiteIntArray* shape);
301
302 #endif // !defined(TF_LITE_STATIC_MEMORY)
303
304 // Calculates the output_shape that is necessary for element-wise operations
305 // with broadcasting involving the two input tensors.
306 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
307 const TfLiteTensor* input1,
308 const TfLiteTensor* input2,
309 TfLiteIntArray** output_shape);
310
311 // Calculates the output_shape that is necessary for element-wise operations
312 // with broadcasting involving the three input tensors.
313 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
314 const TfLiteTensor* input1,
315 const TfLiteTensor* input2,
316 const TfLiteTensor* input3,
317 TfLiteIntArray** output_shape);
318
319 // Return the size of given type in bytes. Return 0 in case of string.
320 int TfLiteTypeGetSize(TfLiteType type);
321
322 // Whether the current platform is mobile (Android or iOS).
323 bool IsMobilePlatform();
324
325 // Returns whether there is unspecified dimension in the tensor's dim signature.
326 bool HasUnspecifiedDimension(const TfLiteTensor* tensor);
327
328 } // namespace tflite
329
330 #endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
331