xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/kernel_util_test.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/kernels/kernel_util.h"
16 
17 #include <math.h>
18 #include <stdint.h>
19 #include <stdlib.h>
20 #include <string.h>
21 
22 #include <initializer_list>
23 #include <vector>
24 
25 #include <gmock/gmock.h>
26 #include <gtest/gtest.h>
27 #include "absl/strings/match.h"
28 #include "tensorflow/lite/c/builtin_op_data.h"
29 #include "tensorflow/lite/c/common.h"
30 #include "tensorflow/lite/testing/util.h"
31 
32 namespace tflite {
33 namespace {
34 using ::testing::ElementsAre;
35 
36 struct TestContext : public TfLiteContext {
37   string error;
38 };
39 
ReportError(TfLiteContext * context,const char * format,...)40 void ReportError(TfLiteContext* context, const char* format, ...) {
41   TestContext* c = static_cast<TestContext*>(context);
42   const size_t kBufferSize = 1024;
43   char temp_buffer[kBufferSize];
44 
45   va_list args;
46   va_start(args, format);
47   vsnprintf(temp_buffer, kBufferSize, format, args);
48   va_end(args);
49 
50   c->error = temp_buffer;
51 }
52 
53 class KernelUtilTest : public ::testing::Test {
54  public:
KernelUtilTest()55   KernelUtilTest() {
56     context_.ReportError = ReportError;
57 
58     memset(&tensor1_, 0, sizeof(TfLiteTensor));
59     memset(&tensor2_, 0, sizeof(TfLiteTensor));
60     memset(&tensor3_, 0, sizeof(TfLiteTensor));
61     tensor1_.dims = nullptr;
62     tensor2_.dims = nullptr;
63     tensor3_.dims = nullptr;
64     tensor1_.allocation_type = kTfLiteMmapRo;
65     tensor2_.allocation_type = kTfLiteMmapRo;
66     tensor3_.allocation_type = kTfLiteMmapRo;
67   }
~KernelUtilTest()68   ~KernelUtilTest() override {
69     TfLiteTensorFree(&tensor1_);
70     TfLiteTensorFree(&tensor2_);
71     TfLiteTensorFree(&tensor3_);
72   }
73 
SetShape(TfLiteTensor * tensor,std::initializer_list<int> dims)74   void SetShape(TfLiteTensor* tensor, std::initializer_list<int> dims) {
75     TfLiteTensorFree(tensor);
76     tensor->dims = TfLiteIntArrayCreate(dims.size());
77     int i = 0;
78     for (const auto& d : dims) {
79       tensor->dims->data[i] = d;
80       ++i;
81     }
82   }
83 
GetShape(TfLiteIntArray * dims)84   std::vector<int> GetShape(TfLiteIntArray* dims) {
85     std::vector<int> result;
86     for (int i = 0; i < dims->size; ++i) {
87       result.push_back(dims->data[i]);
88     }
89     return result;
90   }
91 
92  protected:
93   TestContext context_;
94   TfLiteTensor tensor1_;
95   TfLiteTensor tensor2_;
96   TfLiteTensor tensor3_;
97 };
98 
TEST_F(KernelUtilTest,SameShapeEmpty)99 TEST_F(KernelUtilTest, SameShapeEmpty) {
100   EXPECT_TRUE(HaveSameShapes(&tensor1_, &tensor2_));
101 
102   SetShape(&tensor1_, {1, 2, 3});
103   EXPECT_FALSE(HaveSameShapes(&tensor1_, &tensor2_));
104 
105   SetShape(&tensor2_, {1, 2});
106   EXPECT_FALSE(HaveSameShapes(&tensor1_, &tensor2_));
107 
108   SetShape(&tensor2_, {1, 2, 3, 4});
109   EXPECT_FALSE(HaveSameShapes(&tensor1_, &tensor2_));
110 
111   SetShape(&tensor2_, {1, 2, 3});
112   EXPECT_TRUE(HaveSameShapes(&tensor1_, &tensor2_));
113 
114   SetShape(&tensor2_, {});
115   EXPECT_FALSE(HaveSameShapes(&tensor1_, &tensor2_));
116 
117   SetShape(&tensor1_, {});
118   EXPECT_TRUE(HaveSameShapes(&tensor1_, &tensor2_));
119 }
120 
TEST_F(KernelUtilTest,BroadcastShapeIncompatibleDim)121 TEST_F(KernelUtilTest, BroadcastShapeIncompatibleDim) {
122   TfLiteIntArray* output = nullptr;
123   SetShape(&tensor1_, {1, 2});
124   SetShape(&tensor2_, {1, 3});
125   EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
126                                                   &tensor2_, &output));
127   EXPECT_EQ(output, nullptr);
128   EXPECT_EQ(context_.error,
129             "Given shapes, [1,2] and [1,3], are not broadcastable.");
130 }
131 
TEST_F(KernelUtilTest,BroadcastShapeIncompatibleDimWithZero)132 TEST_F(KernelUtilTest, BroadcastShapeIncompatibleDimWithZero) {
133   TfLiteIntArray* output = nullptr;
134   SetShape(&tensor1_, {1, 0});
135   SetShape(&tensor2_, {1, 3});
136   EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
137                                                   &tensor2_, &output));
138   EXPECT_EQ(output, nullptr);
139   EXPECT_EQ(context_.error,
140             "Given shapes, [1,0] and [1,3], are not broadcastable.");
141 }
142 
TEST_F(KernelUtilTest,BroadcastShapeOnes)143 TEST_F(KernelUtilTest, BroadcastShapeOnes) {
144   TfLiteIntArray* output = nullptr;
145   SetShape(&tensor1_, {1, 1});
146   SetShape(&tensor2_, {1, 3});
147   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
148                                                   &tensor2_, &output));
149   TfLiteIntArrayFree(output);
150 
151   SetShape(&tensor1_, {1, 2});
152   SetShape(&tensor2_, {1, 1});
153   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
154                                                   &tensor2_, &output));
155   TfLiteIntArrayFree(output);
156 }
157 
TEST_F(KernelUtilTest,BroadcastShapeScalars)158 TEST_F(KernelUtilTest, BroadcastShapeScalars) {
159   TfLiteIntArray* output = nullptr;
160   SetShape(&tensor1_, {1, 2});
161   SetShape(&tensor2_, {});
162   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
163                                                   &tensor2_, &output));
164   EXPECT_THAT(GetShape(output), ElementsAre(1, 2));
165   TfLiteIntArrayFree(output);
166 
167   SetShape(&tensor1_, {});
168   SetShape(&tensor2_, {2});
169   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
170                                                   &tensor2_, &output));
171   EXPECT_THAT(GetShape(output), ElementsAre(2));
172   TfLiteIntArrayFree(output);
173 }
174 
TEST_F(KernelUtilTest,BroadcastShapeDifferentSizes)175 TEST_F(KernelUtilTest, BroadcastShapeDifferentSizes) {
176   TfLiteIntArray* output = nullptr;
177   SetShape(&tensor1_, {1, 2});
178   SetShape(&tensor2_, {3, 1, 1});
179   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
180                                                   &tensor2_, &output));
181   EXPECT_THAT(GetShape(output), ElementsAre(3, 1, 2));
182   TfLiteIntArrayFree(output);
183 
184   SetShape(&tensor1_, {1, 2, 3, 4});
185   SetShape(&tensor2_, {1, 3, 1});
186   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
187                                                   &tensor2_, &output));
188   EXPECT_THAT(GetShape(output), ElementsAre(1, 2, 3, 4));
189   TfLiteIntArrayFree(output);
190 }
191 
TEST_F(KernelUtilTest,BroadcastShapeWithZero)192 TEST_F(KernelUtilTest, BroadcastShapeWithZero) {
193   TfLiteIntArray* output = nullptr;
194   SetShape(&tensor1_, {1, 2});
195   SetShape(&tensor2_, {3, 0, 1});
196   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
197                                                   &tensor2_, &output));
198   EXPECT_THAT(GetShape(output), ElementsAre(3, 0, 2));
199   TfLiteIntArrayFree(output);
200 
201   SetShape(&tensor1_, {2, 1, 0});
202   SetShape(&tensor2_, {1, 3, 1});
203   EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, &tensor1_,
204                                                   &tensor2_, &output));
205   EXPECT_THAT(GetShape(output), ElementsAre(2, 3, 0));
206   TfLiteIntArrayFree(output);
207 }
208 
TEST_F(KernelUtilTest,BroadcastShapeIncompatibleDimOnThreeTensors)209 TEST_F(KernelUtilTest, BroadcastShapeIncompatibleDimOnThreeTensors) {
210   TfLiteIntArray* output = nullptr;
211   SetShape(&tensor1_, {1, 2});
212   SetShape(&tensor2_, {1, 3});
213   SetShape(&tensor3_, {1, 4});
214   EXPECT_NE(kTfLiteOk,
215             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
216                                        &tensor3_, &output));
217   EXPECT_EQ(output, nullptr);
218   EXPECT_EQ(context_.error,
219             "Given shapes, [1,2], [1,3] and [1,4], are not broadcastable.");
220 }
221 
TEST_F(KernelUtilTest,BroadcastShapeIncompatibleDimWithZeroOnThreeTensors)222 TEST_F(KernelUtilTest, BroadcastShapeIncompatibleDimWithZeroOnThreeTensors) {
223   TfLiteIntArray* output = nullptr;
224   SetShape(&tensor1_, {1, 1});
225   SetShape(&tensor2_, {1, 3});
226   SetShape(&tensor3_, {1, 0});
227   EXPECT_NE(kTfLiteOk,
228             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
229                                        &tensor3_, &output));
230   EXPECT_EQ(output, nullptr);
231   EXPECT_EQ(context_.error,
232             "Given shapes, [1,1], [1,3] and [1,0], are not broadcastable.");
233 }
234 
TEST_F(KernelUtilTest,BroadcastShapeOnesOnThreeTensors)235 TEST_F(KernelUtilTest, BroadcastShapeOnesOnThreeTensors) {
236   TfLiteIntArray* output = nullptr;
237   SetShape(&tensor1_, {1, 1});
238   SetShape(&tensor2_, {1, 1});
239   SetShape(&tensor3_, {1, 3});
240   EXPECT_EQ(kTfLiteOk,
241             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
242                                        &tensor3_, &output));
243   TfLiteIntArrayFree(output);
244 
245   SetShape(&tensor1_, {1, 2});
246   SetShape(&tensor2_, {1, 1});
247   SetShape(&tensor3_, {1, 1});
248   EXPECT_EQ(kTfLiteOk,
249             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
250                                        &tensor3_, &output));
251   TfLiteIntArrayFree(output);
252 
253   SetShape(&tensor1_, {1, 1});
254   SetShape(&tensor2_, {1, 4});
255   SetShape(&tensor3_, {1, 1});
256   EXPECT_EQ(kTfLiteOk,
257             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
258                                        &tensor3_, &output));
259   TfLiteIntArrayFree(output);
260 }
261 
TEST_F(KernelUtilTest,BroadcastShapeScalarsOnThreeTensors)262 TEST_F(KernelUtilTest, BroadcastShapeScalarsOnThreeTensors) {
263   TfLiteIntArray* output = nullptr;
264   SetShape(&tensor1_, {1, 2});
265   SetShape(&tensor2_, {});
266   SetShape(&tensor3_, {});
267   EXPECT_EQ(kTfLiteOk,
268             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
269                                        &tensor3_, &output));
270   EXPECT_THAT(GetShape(output), ElementsAre(1, 2));
271   TfLiteIntArrayFree(output);
272 
273   SetShape(&tensor1_, {});
274   SetShape(&tensor2_, {2});
275   SetShape(&tensor3_, {});
276   EXPECT_EQ(kTfLiteOk,
277             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
278                                        &tensor3_, &output));
279   EXPECT_THAT(GetShape(output), ElementsAre(2));
280   TfLiteIntArrayFree(output);
281 
282   SetShape(&tensor1_, {});
283   SetShape(&tensor2_, {});
284   SetShape(&tensor3_, {3, 2, 1});
285   EXPECT_EQ(kTfLiteOk,
286             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
287                                        &tensor3_, &output));
288   EXPECT_THAT(GetShape(output), ElementsAre(3, 2, 1));
289   TfLiteIntArrayFree(output);
290 }
291 
TEST_F(KernelUtilTest,BroadcastShapeDifferentSizesOnThreeTensors)292 TEST_F(KernelUtilTest, BroadcastShapeDifferentSizesOnThreeTensors) {
293   TfLiteIntArray* output = nullptr;
294   SetShape(&tensor1_, {1, 2});
295   SetShape(&tensor2_, {3, 1, 1});
296   SetShape(&tensor3_, {3, 1});
297   EXPECT_EQ(kTfLiteOk,
298             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
299                                        &tensor3_, &output));
300   EXPECT_THAT(GetShape(output), ElementsAre(3, 3, 2));
301   TfLiteIntArrayFree(output);
302 
303   SetShape(&tensor1_, {3, 4});
304   SetShape(&tensor2_, {1, 3, 1});
305   SetShape(&tensor3_, {1, 2, 1, 1});
306   EXPECT_EQ(kTfLiteOk,
307             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
308                                        &tensor3_, &output));
309   EXPECT_THAT(GetShape(output), ElementsAre(1, 2, 3, 4));
310   TfLiteIntArrayFree(output);
311 }
312 
TEST_F(KernelUtilTest,BroadcastShapeWithZeroOnThreeTensors)313 TEST_F(KernelUtilTest, BroadcastShapeWithZeroOnThreeTensors) {
314   TfLiteIntArray* output = nullptr;
315   SetShape(&tensor1_, {1, 2});
316   SetShape(&tensor2_, {3, 1, 1});
317   SetShape(&tensor3_, {0, 1});
318   EXPECT_EQ(kTfLiteOk,
319             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
320                                        &tensor3_, &output));
321   EXPECT_THAT(GetShape(output), ElementsAre(3, 0, 2));
322   TfLiteIntArrayFree(output);
323 
324   SetShape(&tensor1_, {1, 4});
325   SetShape(&tensor2_, {1, 0, 1});
326   SetShape(&tensor3_, {1, 2, 1, 1});
327   EXPECT_EQ(kTfLiteOk,
328             CalculateShapeForBroadcast(&context_, &tensor1_, &tensor2_,
329                                        &tensor3_, &output));
330   EXPECT_THAT(GetShape(output), ElementsAre(1, 2, 0, 4));
331   TfLiteIntArrayFree(output);
332 }
333 
TEST_F(KernelUtilTest,GetShapeDebugString)334 TEST_F(KernelUtilTest, GetShapeDebugString) {
335   TfLiteIntArray* dims0 = TfLiteIntArrayCreate(0);
336   EXPECT_EQ("[]", GetShapeDebugString(dims0));
337   TfLiteIntArrayFree(dims0);
338 
339   TfLiteIntArray* dims1 = TfLiteIntArrayCreate(1);
340   dims1->data[0] = 1;
341   EXPECT_EQ("[1]", GetShapeDebugString(dims1));
342   TfLiteIntArrayFree(dims1);
343 
344   TfLiteIntArray* dims2 = TfLiteIntArrayCreate(2);
345   dims2->data[0] = 2;
346   dims2->data[1] = 3;
347   EXPECT_EQ("[2,3]", GetShapeDebugString(dims2));
348   TfLiteIntArrayFree(dims2);
349 
350   TfLiteIntArray* dims3 = TfLiteIntArrayCreate(3);
351   dims3->data[0] = 4;
352   dims3->data[1] = 5;
353   dims3->data[2] = 6;
354   EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3));
355   TfLiteIntArrayFree(dims3);
356 }
357 
TEST_F(KernelUtilTest,CheckAndPopulate)358 TEST_F(KernelUtilTest, CheckAndPopulate) {
359   // Create input.
360   TfLiteTensor input = {};
361   input.type = kTfLiteInt8;
362   input.allocation_type = kTfLiteArenaRw;
363   input.dims = TfLiteIntArrayCreate(1);
364   input.dims->data[0] = 2;
365   TfLiteQuantizationParams input_quant = {0.5, 5};
366   input.params = input_quant;
367   input.quantization.type = kTfLiteAffineQuantization;
368   auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
369       malloc(sizeof(TfLiteAffineQuantization)));
370   input_params->scale = TfLiteFloatArrayCreate(1);
371   input_params->scale->data[0] = 0.5;
372   input_params->zero_point = TfLiteIntArrayCreate(1);
373   input_params->zero_point->data[0] = 5;
374   input.quantization.params = reinterpret_cast<void*>(input_params);
375 
376   // Create filter.
377   TfLiteTensor filter = {};
378   filter.type = kTfLiteInt8;
379   filter.allocation_type = kTfLiteArenaRw;
380   filter.dims = TfLiteIntArrayCreate(4);
381   filter.dims->data[0] = 3;
382   filter.dims->data[1] = 4;
383   filter.dims->data[2] = 5;
384   filter.dims->data[3] = 6;
385   TfLiteQuantizationParams filter_quant = {0.25, 0};
386   filter.params = filter_quant;
387   filter.quantization.type = kTfLiteAffineQuantization;
388   auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
389       malloc(sizeof(TfLiteAffineQuantization)));
390   filter_params->scale = TfLiteFloatArrayCreate(3);
391   filter_params->scale->data[0] = 0.25;
392   filter_params->scale->data[1] = 0.125;
393   filter_params->scale->data[2] = 0.25;
394   filter_params->zero_point = TfLiteIntArrayCreate(3);
395   filter_params->zero_point->data[0] = 0;
396   filter_params->zero_point->data[1] = 0;
397   filter_params->zero_point->data[2] = 0;
398   filter_params->quantized_dimension = 0;
399   filter.quantization.params = reinterpret_cast<void*>(filter_params);
400 
401   // Create bias.
402   TfLiteTensor bias = {};
403   bias.type = kTfLiteInt32;
404   bias.allocation_type = kTfLiteArenaRw;
405   bias.dims = TfLiteIntArrayCreate(4);
406   TfLiteQuantizationParams bias_quant = {0.125, 9};
407   bias.params = bias_quant;
408   bias.quantization.type = kTfLiteAffineQuantization;
409   auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
410       malloc(sizeof(TfLiteAffineQuantization)));
411   bias_params->scale = TfLiteFloatArrayCreate(3);
412   bias_params->scale->data[0] = 0.125;
413   bias_params->scale->data[1] = 0.0625;
414   bias_params->scale->data[2] = 0.125;
415   bias_params->zero_point = TfLiteIntArrayCreate(3);
416   bias_params->zero_point->data[0] = 11;
417   bias_params->zero_point->data[1] = 12;
418   bias_params->zero_point->data[2] = 15;
419   bias.quantization.params = reinterpret_cast<void*>(bias_params);
420 
421   // Create output.
422   TfLiteTensor output = {};
423   output.type = kTfLiteInt8;
424   output.allocation_type = kTfLiteArenaRw;
425   output.dims = nullptr;
426   TfLiteQuantizationParams output_quant = {0.5, -128};
427   output.params = output_quant;
428   output.quantization.type = kTfLiteAffineQuantization;
429   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
430       malloc(sizeof(TfLiteAffineQuantization)));
431   output_params->scale = TfLiteFloatArrayCreate(1);
432   output_params->scale->data[0] = 0.5;
433   output_params->zero_point = TfLiteIntArrayCreate(1);
434   output_params->zero_point->data[0] = -128;
435   output.quantization.params = reinterpret_cast<void*>(output_params);
436 
437   // Create call parameters.
438   int32_t multiplier;
439   int shift;
440   int32_t output_activation_min;
441   int32_t output_activation_max;
442   std::vector<int32_t> per_channel_multiplier(3);
443   std::vector<int32_t> per_channel_shift(3);
444 
445   // Call and verify results for per channel case.
446   EXPECT_EQ(
447       kTfLiteOk,
448       PopulateConvolutionQuantizationParams(
449           &context_, &input, &filter, &bias, &output, kTfLiteActRelu,
450           &multiplier, &shift, &output_activation_min, &output_activation_max,
451           per_channel_multiplier.data(), per_channel_shift.data()));
452   EXPECT_THAT(per_channel_multiplier,
453               ElementsAre(1073741824, 1073741824, 1073741824));
454   EXPECT_THAT(per_channel_shift, ElementsAre(-1, -2, -1));
455 
456   // Release.
457   TfLiteTensorFree(&input);
458   TfLiteTensorFree(&filter);
459   TfLiteTensorFree(&bias);
460   TfLiteTensorFree(&output);
461 }
462 
TEST_F(KernelUtilTest,CheckAndPopulateShift)463 TEST_F(KernelUtilTest, CheckAndPopulateShift) {
464   // Create input of type kTfLiteUInt8.
465   TfLiteTensor input = {};
466   input.type = kTfLiteUInt8;
467   input.allocation_type = kTfLiteArenaRw;
468   input.dims = TfLiteIntArrayCreate(1);
469   input.dims->data[0] = 2;
470   TfLiteQuantizationParams input_quant = {0.5, 5};
471   input.params = input_quant;
472   input.quantization.type = kTfLiteAffineQuantization;
473   auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
474       malloc(sizeof(TfLiteAffineQuantization)));
475   input_params->scale = TfLiteFloatArrayCreate(1);
476   input_params->scale->data[0] = 0.5;
477   input_params->zero_point = TfLiteIntArrayCreate(1);
478   input_params->zero_point->data[0] = 5;
479   input.quantization.params = reinterpret_cast<void*>(input_params);
480 
481   // Create filter of type kTfLiteUInt8.
482   TfLiteTensor filter = {};
483   filter.type = kTfLiteUInt8;
484   filter.allocation_type = kTfLiteArenaRw;
485   filter.dims = TfLiteIntArrayCreate(4);
486   filter.dims->data[0] = 3;
487   filter.dims->data[1] = 4;
488   filter.dims->data[2] = 5;
489   filter.dims->data[3] = 6;
490   TfLiteQuantizationParams filter_quant = {0.25, 0};
491   filter.params = filter_quant;
492   filter.quantization.type = kTfLiteAffineQuantization;
493   auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
494       malloc(sizeof(TfLiteAffineQuantization)));
495   // Create scale of size one.
496   filter_params->scale = TfLiteFloatArrayCreate(1);
497   filter_params->scale->data[0] = 0.25;
498   filter_params->zero_point = TfLiteIntArrayCreate(1);
499   filter_params->zero_point->data[0] = 0;
500   filter_params->quantized_dimension = 0;
501   filter.quantization.params = reinterpret_cast<void*>(filter_params);
502 
503   // Create bias for kTfLiteUInt8.
504   TfLiteTensor bias = {};
505   bias.type = kTfLiteUInt8;
506   bias.allocation_type = kTfLiteArenaRw;
507   bias.dims = TfLiteIntArrayCreate(4);
508   TfLiteQuantizationParams bias_quant = {0.125, 9};
509   bias.params = bias_quant;
510   bias.quantization.type = kTfLiteAffineQuantization;
511   auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
512       malloc(sizeof(TfLiteAffineQuantization)));
513   bias_params->scale = TfLiteFloatArrayCreate(3);
514   bias_params->scale->data[0] = 0.125;
515   bias_params->scale->data[1] = 0.0625;
516   bias_params->scale->data[2] = 0.125;
517   bias_params->zero_point = TfLiteIntArrayCreate(3);
518   bias_params->zero_point->data[0] = 11;
519   bias_params->zero_point->data[1] = 12;
520   bias_params->zero_point->data[2] = 15;
521   bias.quantization.params = reinterpret_cast<void*>(bias_params);
522 
523   // Create output for kTfLiteUInt8.
524   TfLiteTensor output = {};
525   output.type = kTfLiteUInt8;
526   output.allocation_type = kTfLiteArenaRw;
527   output.dims = nullptr;
528   TfLiteQuantizationParams output_quant = {0.5, 128};
529   output.params = output_quant;
530   output.quantization.type = kTfLiteAffineQuantization;
531   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
532       malloc(sizeof(TfLiteAffineQuantization)));
533   output_params->scale = TfLiteFloatArrayCreate(1);
534   output_params->scale->data[0] = 0.5;
535   output_params->zero_point = TfLiteIntArrayCreate(1);
536   output_params->zero_point->data[0] = 128;
537   output.quantization.params = reinterpret_cast<void*>(output_params);
538 
539   // Create call parameters.
540   int32_t multiplier;
541   int shift;
542   int32_t output_activation_min;
543   int32_t output_activation_max;
544   std::vector<int32_t> per_channel_multiplier(3);
545   std::vector<int> per_channel_shift(3);
546 
547   // Call and verify results for per channel case.
548   EXPECT_EQ(
549       kTfLiteOk,
550       PopulateConvolutionQuantizationParams(
551           &context_, &input, &filter, &bias, &output, kTfLiteActRelu,
552           &multiplier, &shift, &output_activation_min, &output_activation_max,
553           per_channel_multiplier.data(), per_channel_shift.data(), 3));
554   // Since the filter scale has a size of one but the number of channels is
555   // three, in our TC we expect three 1073741824 as output
556   EXPECT_THAT(per_channel_multiplier,
557               ElementsAre(1073741824, 1073741824, 1073741824));
558   EXPECT_THAT(per_channel_shift, ElementsAre(-1, -1, -1));
559   EXPECT_EQ(shift, 1);
560   EXPECT_EQ(multiplier, 1073741824);
561 
562   // Release.
563   TfLiteTensorFree(&input);
564   TfLiteTensorFree(&filter);
565   TfLiteTensorFree(&bias);
566   TfLiteTensorFree(&output);
567 }
568 
569 #ifndef __APPLE__  // Some Apple toolchains don't support std::ldexp
TEST_F(KernelUtilTest,CheckAndPopulateZeroValue)570 TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
571   // Create input.
572   TfLiteTensor input = {};
573   input.type = kTfLiteInt8;
574   input.allocation_type = kTfLiteArenaRw;
575   input.dims = TfLiteIntArrayCreate(1);
576   input.dims->data[0] = 2;
577   TfLiteQuantizationParams input_quant = {1, 5};
578   input.params = input_quant;
579   input.quantization.type = kTfLiteAffineQuantization;
580   auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
581       malloc(sizeof(TfLiteAffineQuantization)));
582   input_params->scale = TfLiteFloatArrayCreate(1);
583   input_params->scale->data[0] = 1;
584   input_params->zero_point = TfLiteIntArrayCreate(1);
585   input_params->zero_point->data[0] = 5;
586   input.quantization.params = reinterpret_cast<void*>(input_params);
587 
588   // Create filter.
589   TfLiteTensor filter = {};
590   filter.type = kTfLiteInt8;
591   filter.allocation_type = kTfLiteArenaRw;
592   filter.dims = TfLiteIntArrayCreate(4);
593   filter.dims->data[0] = 3;
594   filter.dims->data[1] = 4;
595   filter.dims->data[2] = 5;
596   filter.dims->data[3] = 6;
597   TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
598   filter.params = filter_quant;
599   filter.quantization.type = kTfLiteAffineQuantization;
600   auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
601       malloc(sizeof(TfLiteAffineQuantization)));
602   filter_params->scale = TfLiteFloatArrayCreate(3);
603   filter_params->scale->data[0] = std::ldexp(1.0f, -31);
604   filter_params->scale->data[1] = std::ldexp(1.0f, -32);
605   filter_params->scale->data[2] = std::ldexp(1.0f, -33);
606   filter_params->zero_point = TfLiteIntArrayCreate(3);
607   filter_params->zero_point->data[0] = 0;
608   filter_params->zero_point->data[1] = 0;
609   filter_params->zero_point->data[2] = 0;
610   filter_params->quantized_dimension = 0;
611   filter.quantization.params = reinterpret_cast<void*>(filter_params);
612 
613   // Create bias.
614   TfLiteTensor bias = {};
615   bias.type = kTfLiteInt32;
616   bias.allocation_type = kTfLiteArenaRw;
617   bias.dims = TfLiteIntArrayCreate(4);
618   TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
619   bias.params = bias_quant;
620   bias.quantization.type = kTfLiteAffineQuantization;
621   auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
622       malloc(sizeof(TfLiteAffineQuantization)));
623   bias_params->scale = TfLiteFloatArrayCreate(3);
624   bias_params->scale->data[0] = std::ldexp(1.0f, -31);
625   bias_params->scale->data[1] = std::ldexp(1.0f, -32);
626   bias_params->scale->data[2] = std::ldexp(1.0f, -33);
627   bias_params->zero_point = TfLiteIntArrayCreate(3);
628   bias_params->zero_point->data[0] = 11;
629   bias_params->zero_point->data[1] = 12;
630   bias_params->zero_point->data[2] = 15;
631   bias.quantization.params = reinterpret_cast<void*>(bias_params);
632 
633   // Create output.
634   TfLiteTensor output = {};
635   output.type = kTfLiteInt8;
636   output.allocation_type = kTfLiteArenaRw;
637   output.dims = nullptr;
638   TfLiteQuantizationParams output_quant = {1, -128};
639   output.params = output_quant;
640   output.quantization.type = kTfLiteAffineQuantization;
641   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
642       malloc(sizeof(TfLiteAffineQuantization)));
643   output_params->scale = TfLiteFloatArrayCreate(1);
644   output_params->scale->data[0] = 1;
645   output_params->zero_point = TfLiteIntArrayCreate(1);
646   output_params->zero_point->data[0] = -128;
647   output.quantization.params = reinterpret_cast<void*>(output_params);
648 
649   // Create call parameters.
650   int32_t multiplier;
651   int shift;
652   int32_t output_activation_min;
653   int32_t output_activation_max;
654   std::vector<int32_t> per_channel_multiplier(3);
655   std::vector<int> per_channel_shift(3);
656 
657   // Call and verify results for per channel case.
658   EXPECT_EQ(
659       kTfLiteOk,
660       PopulateConvolutionQuantizationParams(
661           &context_, &input, &filter, &bias, &output, kTfLiteActRelu,
662           &multiplier, &shift, &output_activation_min, &output_activation_max,
663           per_channel_multiplier.data(), per_channel_shift.data(), 3));
664   EXPECT_THAT(per_channel_multiplier, ElementsAre(1073741824, 1073741824, 0));
665   EXPECT_THAT(per_channel_shift, ElementsAre(-30, -31, 0));
666 
667   // Release.
668   TfLiteTensorFree(&input);
669   TfLiteTensorFree(&filter);
670   TfLiteTensorFree(&bias);
671   TfLiteTensorFree(&output);
672 }
673 #endif
674 
TEST_F(KernelUtilTest,CheckAndPopulateUint8)675 TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
676   // Create input.
677   TfLiteTensor input = {};
678   input.type = kTfLiteUInt8;
679   input.allocation_type = kTfLiteArenaRw;
680   input.dims = TfLiteIntArrayCreate(1);
681   input.dims->data[0] = 2;
682   TfLiteQuantizationParams input_quant = {1, 5};
683   input.params = input_quant;
684   input.quantization.type = kTfLiteAffineQuantization;
685   auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
686       malloc(sizeof(TfLiteAffineQuantization)));
687   input_params->scale = TfLiteFloatArrayCreate(1);
688   input_params->scale->data[0] = 1;
689   input_params->zero_point = TfLiteIntArrayCreate(1);
690   input_params->zero_point->data[0] = 5;
691   input.quantization.params = reinterpret_cast<void*>(input_params);
692 
693   // Create filter.
694   TfLiteTensor filter = {};
695   filter.type = kTfLiteUInt8;
696   filter.allocation_type = kTfLiteArenaRw;
697   filter.dims = TfLiteIntArrayCreate(4);
698   filter.dims->data[0] = 3;
699   filter.dims->data[1] = 4;
700   filter.dims->data[2] = 5;
701   filter.dims->data[3] = 6;
702   TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
703   filter.params = filter_quant;
704   filter.quantization.type = kTfLiteAffineQuantization;
705   auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
706       malloc(sizeof(TfLiteAffineQuantization)));
707   filter_params->scale = TfLiteFloatArrayCreate(1);
708   int32_t two_pow_neg_31 = 0x30000000;  // 2^-31 so shift = -30.
709   filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
710   filter_params->zero_point = TfLiteIntArrayCreate(1);
711   filter_params->zero_point->data[0] = 0;
712   filter_params->quantized_dimension = 0;
713   filter.quantization.params = reinterpret_cast<void*>(filter_params);
714 
715   // Create bias.
716   TfLiteTensor bias = {};
717   bias.type = kTfLiteInt32;
718   bias.allocation_type = kTfLiteArenaRw;
719   bias.dims = TfLiteIntArrayCreate(4);
720   TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
721   bias.params = bias_quant;
722   bias.quantization.type = kTfLiteAffineQuantization;
723   auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
724       malloc(sizeof(TfLiteAffineQuantization)));
725   bias_params->scale = TfLiteFloatArrayCreate(1);
726   bias_params->scale->data[0] = 4.6566129e-10;  // 2^-31
727   bias_params->zero_point = TfLiteIntArrayCreate(1);
728   bias_params->zero_point->data[0] = 11;
729   bias.quantization.params = reinterpret_cast<void*>(bias_params);
730 
731   // Create output.
732   TfLiteTensor output = {};
733   output.type = kTfLiteUInt8;
734   output.allocation_type = kTfLiteArenaRw;
735   output.dims = nullptr;
736   TfLiteQuantizationParams output_quant = {1, -128};
737   output.params = output_quant;
738   output.quantization.type = kTfLiteAffineQuantization;
739   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
740       malloc(sizeof(TfLiteAffineQuantization)));
741   output_params->scale = TfLiteFloatArrayCreate(1);
742   output_params->scale->data[0] = 1;
743   output_params->zero_point = TfLiteIntArrayCreate(1);
744   output_params->zero_point->data[0] = -128;
745   output.quantization.params = reinterpret_cast<void*>(output_params);
746 
747   // Create call parameters.
748   int32_t multiplier;
749   int shift;
750   int32_t output_activation_min;
751   int32_t output_activation_max;
752   std::vector<int32_t> per_channel_multiplier(3);
753   std::vector<int> per_channel_shift(3);
754 
755   // Call and verify results for per channel case.
756   EXPECT_EQ(
757       kTfLiteOk,
758       PopulateConvolutionQuantizationParams(
759           &context_, &input, &filter, &bias, &output, kTfLiteActRelu,
760           &multiplier, &shift, &output_activation_min, &output_activation_max,
761           per_channel_multiplier.data(), per_channel_shift.data(), 3));
762   EXPECT_THAT(per_channel_multiplier,
763               ElementsAre(1073741824, 1073741824, 1073741824));
764   EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
765 
766   // Release.
767   TfLiteTensorFree(&input);
768   TfLiteTensorFree(&filter);
769   TfLiteTensorFree(&bias);
770   TfLiteTensorFree(&output);
771 }
772 
TEST_F(KernelUtilTest,CheckAndPopulateWithoutBias)773 TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) {
774   // Create input.
775   TfLiteTensor input = {};
776   input.type = kTfLiteUInt8;
777   input.allocation_type = kTfLiteArenaRw;
778   input.dims = TfLiteIntArrayCreate(1);
779   input.dims->data[0] = 2;
780   TfLiteQuantizationParams input_quant = {1, 5};
781   input.params = input_quant;
782   input.quantization.type = kTfLiteAffineQuantization;
783   auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
784       malloc(sizeof(TfLiteAffineQuantization)));
785   input_params->scale = TfLiteFloatArrayCreate(1);
786   input_params->scale->data[0] = 1;
787   input_params->zero_point = TfLiteIntArrayCreate(1);
788   input_params->zero_point->data[0] = 5;
789   input.quantization.params = reinterpret_cast<void*>(input_params);
790 
791   // Create filter.
792   TfLiteTensor filter = {};
793   filter.type = kTfLiteUInt8;
794   filter.allocation_type = kTfLiteArenaRw;
795   filter.dims = TfLiteIntArrayCreate(4);
796   filter.dims->data[0] = 3;
797   filter.dims->data[1] = 4;
798   filter.dims->data[2] = 5;
799   filter.dims->data[3] = 6;
800   TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
801   filter.params = filter_quant;
802   filter.quantization.type = kTfLiteAffineQuantization;
803   auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
804       malloc(sizeof(TfLiteAffineQuantization)));
805   filter_params->scale = TfLiteFloatArrayCreate(1);
806   int32_t two_pow_neg_31 = 0x30000000;  // 2^-31 so shift = -30.
807   filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
808   filter_params->zero_point = TfLiteIntArrayCreate(1);
809   filter_params->zero_point->data[0] = 0;
810   filter_params->quantized_dimension = 0;
811   filter.quantization.params = reinterpret_cast<void*>(filter_params);
812 
813   // Create output.
814   TfLiteTensor output = {};
815   output.type = kTfLiteUInt8;
816   output.allocation_type = kTfLiteArenaRw;
817   output.dims = nullptr;
818   TfLiteQuantizationParams output_quant = {1, -128};
819   output.params = output_quant;
820   output.quantization.type = kTfLiteAffineQuantization;
821   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
822       malloc(sizeof(TfLiteAffineQuantization)));
823   output_params->scale = TfLiteFloatArrayCreate(1);
824   output_params->scale->data[0] = 1;
825   output_params->zero_point = TfLiteIntArrayCreate(1);
826   output_params->zero_point->data[0] = -128;
827   output.quantization.params = reinterpret_cast<void*>(output_params);
828 
829   // Create call parameters.
830   int32_t multiplier;
831   int shift;
832   int32_t output_activation_min;
833   int32_t output_activation_max;
834   std::vector<int32_t> per_channel_multiplier(3);
835   std::vector<int> per_channel_shift(3);
836 
837   // Call and verify results for per channel case.
838   EXPECT_EQ(
839       kTfLiteOk,
840       PopulateConvolutionQuantizationParams(
841           &context_, &input, &filter, nullptr, &output, kTfLiteActRelu,
842           &multiplier, &shift, &output_activation_min, &output_activation_max,
843           per_channel_multiplier.data(), per_channel_shift.data(), 3));
844   EXPECT_THAT(per_channel_multiplier,
845               ElementsAre(1073741824, 1073741824, 1073741824));
846   EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
847 
848   // Release.
849   TfLiteTensorFree(&input);
850   TfLiteTensorFree(&filter);
851   TfLiteTensorFree(&output);
852 }
853 
TEST_F(KernelUtilTest,ActivationRangeQuantizedOverflow)854 TEST_F(KernelUtilTest, ActivationRangeQuantizedOverflow) {
855   // Create output.
856   TfLiteTensor output = {};
857   output.type = kTfLiteUInt8;
858   output.allocation_type = kTfLiteArenaRw;
859   output.dims = nullptr;
860   TfLiteQuantizationParams output_quant = {1e-10, -128};
861   output.params = output_quant;
862   output.quantization.type = kTfLiteAffineQuantization;
863   auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
864       malloc(sizeof(TfLiteAffineQuantization)));
865   output_params->scale = TfLiteFloatArrayCreate(1);
866   output_params->scale->data[0] = 1;
867   output_params->zero_point = TfLiteIntArrayCreate(1);
868   output_params->zero_point->data[0] = -128;
869   output.quantization.params = reinterpret_cast<void*>(output_params);
870 
871   // For bounded activation, a too small scale value may cause overflow.
872   // Make sure overflow error is handled gracefully.
873   int32_t act_min, act_max;
874   ASSERT_EQ(kTfLiteOk,
875             CalculateActivationRangeQuantized(&context_, kTfLiteActRelu,
876                                               &output, &act_min, &act_max));
877   ASSERT_NE(kTfLiteOk,
878             CalculateActivationRangeQuantized(&context_, kTfLiteActRelu6,
879                                               &output, &act_min, &act_max));
880   EXPECT_TRUE(absl::StrContains(
881       context_.error, "no_integer_overflow_from_quantization was not true"));
882   ASSERT_NE(kTfLiteOk,
883             CalculateActivationRangeQuantized(&context_, kTfLiteActReluN1To1,
884                                               &output, &act_min, &act_max));
885   EXPECT_TRUE(absl::StrContains(
886       context_.error, "no_integer_overflow_from_quantization was not true"));
887 
888   // Release.
889   TfLiteTensorFree(&output);
890 }
891 
TEST_F(KernelUtilTest,IsMobilePlatform)892 TEST_F(KernelUtilTest, IsMobilePlatform) {
893   // Note: This isn't meant to be exhaustive, as that would require replicating
894   // the method's implementation, but it is a basic smoke check.
895 #if defined(__ANDROID__)
896   EXPECT_TRUE(IsMobilePlatform());
897 #elif defined(__linux__)
898   EXPECT_FALSE(IsMobilePlatform());
899 #elif defined(_WIN32)
900   EXPECT_FALSE(IsMobilePlatform());
901 #endif
902 }
903 
TEST_F(KernelUtilTest,HasUnspecifiedDimension)904 TEST_F(KernelUtilTest, HasUnspecifiedDimension) {
905   TfLiteTensor tensor;
906   TfLiteIntArray* shape_sig = TfLiteIntArrayCreate(3);
907   shape_sig->data[0] = 1;
908   shape_sig->data[1] = -1;
909   shape_sig->data[2] = 3;
910   tensor.dims_signature = shape_sig;
911 
912   EXPECT_TRUE(HasUnspecifiedDimension(&tensor));
913 
914   shape_sig->data[1] = 2;
915   EXPECT_FALSE(HasUnspecifiedDimension(&tensor));
916 
917   TfLiteIntArrayFree(shape_sig);
918 }
919 
920 }  // namespace
921 }  // namespace tflite
922