xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/elementwise_test.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include <algorithm>
17 #include <initializer_list>
18 #include <vector>
19 
20 #include <gmock/gmock.h>
21 #include <gtest/gtest.h>
22 #include "tensorflow/lite/kernels/test_util.h"
23 #include "tensorflow/lite/schema/schema_generated.h"
24 
25 namespace tflite {
26 namespace {
27 
28 using ::testing::ElementsAreArray;
29 
30 class ElementWiseOpBaseModel : public SingleOpModel {
31  public:
input() const32   int input() const { return input_; }
output() const33   int output() const { return output_; }
34 
35  protected:
36   int input_;
37   int output_;
38 };
39 
40 class ElementWiseOpFloatModel : public ElementWiseOpBaseModel {
41  public:
ElementWiseOpFloatModel(BuiltinOperator op,std::initializer_list<int> input_shape)42   ElementWiseOpFloatModel(BuiltinOperator op,
43                           std::initializer_list<int> input_shape) {
44     input_ = AddInput(TensorType_FLOAT32);
45     output_ = AddOutput(TensorType_FLOAT32);
46     SetBuiltinOp(op, BuiltinOptions_NONE, 0);
47     BuildInterpreter({input_shape});
48   }
49 };
50 
51 class ElementWiseOpQuantizedModel : public ElementWiseOpBaseModel {
52  public:
ElementWiseOpQuantizedModel(BuiltinOperator op,TensorData input_tensor_data,TensorData output_tensor_data)53   ElementWiseOpQuantizedModel(BuiltinOperator op, TensorData input_tensor_data,
54                               TensorData output_tensor_data) {
55     input_ = AddInput(SymmetricInt16Scaling(input_tensor_data));
56     output_ = AddOutput(SymmetricInt16Scaling(output_tensor_data));
57     SetBuiltinOp(op, BuiltinOptions_NONE, 0);
58     BuildInterpreter({input_tensor_data.shape});
59   }
60 
61   template <typename T>
AsymmetricQuantizeAndPopulate(int index,const std::vector<float> & data)62   void AsymmetricQuantizeAndPopulate(int index,
63                                      const std::vector<float>& data) {
64     std::vector<int8_t> q(data.size());
65     float scaling_factor;
66     int zero_point;
67     tensor_utils::AsymmetricQuantizeFloats(data.data(), data.size(), q.data(),
68                                            &scaling_factor, &zero_point);
69     PopulateTensor<T>(index, /*offset=*/0, reinterpret_cast<T*>(q.data()),
70                       reinterpret_cast<T*>(q.data() + q.size()));
71   }
72 
73   template <typename T>
ExtractDequantVector(int index)74   std::vector<float> ExtractDequantVector(int index) {
75     auto vec = ExtractVector<T>(index);
76     TfLiteTensor* t = interpreter_->tensor(index);
77     auto* affine_quantization =
78         reinterpret_cast<TfLiteAffineQuantization*>(t->quantization.params);
79     float scaling_factor = affine_quantization->scale->data[0];
80     int zero_point = affine_quantization->zero_point->data[0];
81     std::vector<float> output;
82     for (const auto& v : vec) {
83       output.push_back((static_cast<T>(v) - zero_point) * scaling_factor);
84     }
85     return output;
86   }
87 
88  private:
SymmetricInt16Scaling(TensorData & tensor)89   TensorData& SymmetricInt16Scaling(TensorData& tensor) {
90     // Symmetric range and null zero-point is required for INT16 tensors. As
91     // SingleOpModel::QuantizationParams calculates the scale on an asymmetric
92     // base [int_type::min, int_type::max], manually calculate the scale on a
93     // symmetric range [int_type::min+1, int_type::max] to ensure a null
94     // zero-point.
95     if (tensor.type == TensorType_INT16) {
96       CHECK_EQ(std::abs(tensor.min), tensor.max);
97       tensor.scale = tensor.max / std::numeric_limits<int16_t>::max();
98       tensor.zero_point = 0;
99       tensor.min = 0;
100       tensor.max = 0;
101     }
102 
103     return tensor;
104   }
105 };
106 
107 class ElementWiseOpBoolModel : public ElementWiseOpBaseModel {
108  public:
ElementWiseOpBoolModel(BuiltinOperator op,std::initializer_list<int> input_shape)109   ElementWiseOpBoolModel(BuiltinOperator op,
110                          std::initializer_list<int> input_shape) {
111     input_ = AddInput(TensorType_BOOL);
112     output_ = AddOutput(TensorType_BOOL);
113     SetBuiltinOp(op, BuiltinOptions_NONE, 0);
114     BuildInterpreter({input_shape});
115   }
116 };
117 
118 template <typename T>
GetQuantizationStep(float min,float max)119 float GetQuantizationStep(float min, float max) {
120   const float kQuantizedStep = (max - min) / (std::numeric_limits<T>::max() -
121                                               std::numeric_limits<T>::min());
122   return kQuantizedStep;
123 }
124 
TEST(ElementWise,Sin)125 TEST(ElementWise, Sin) {
126   ElementWiseOpFloatModel m(BuiltinOperator_SIN, {1, 1, 4, 1});
127   m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
128   ASSERT_EQ(m.Invoke(), kTfLiteOk);
129   EXPECT_THAT(m.ExtractVector<float>(m.output()),
130               ElementsAreArray(ArrayFloatNear({0, 0, 0, 0.84147})));
131   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
132 }
133 
TEST(ElementWise,Cos)134 TEST(ElementWise, Cos) {
135   ElementWiseOpFloatModel m(BuiltinOperator_COS, {1, 1, 4, 1});
136   m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
137   ASSERT_EQ(m.Invoke(), kTfLiteOk);
138   EXPECT_THAT(m.ExtractVector<float>(m.output()),
139               ElementsAreArray(ArrayFloatNear({1, -1, -1, 0.54030})));
140   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
141 }
142 
TEST(ElementWise,Log)143 TEST(ElementWise, Log) {
144   ElementWiseOpFloatModel m(BuiltinOperator_LOG, {1, 1, 4, 1});
145   m.PopulateTensor<float>(m.input(), {1, 3.1415926, 1, 1});
146   ASSERT_EQ(m.Invoke(), kTfLiteOk);
147   EXPECT_THAT(m.ExtractVector<float>(m.output()),
148               ElementsAreArray(ArrayFloatNear({0, 1.14473, 0, 0})));
149   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
150 }
151 
TEST(ElementWise,Abs)152 TEST(ElementWise, Abs) {
153   ElementWiseOpFloatModel m(BuiltinOperator_ABS, {1, 2, 4, 1});
154   m.PopulateTensor<float>(m.input(), {
155                                          0.f, -6.2f, 2.f, 4.f,  //
156                                          3.f, -2.f, 10.f, 1.f,  //
157                                      });
158   ASSERT_EQ(m.Invoke(), kTfLiteOk);
159   EXPECT_THAT(m.ExtractVector<float>(m.output()), ElementsAreArray({
160                                                       0.f, 6.2f, 2.f, 4.f,  //
161                                                       3.f, 2.f, 10.f, 1.f,  //
162                                                   }));
163 }
164 
TEST(ElementWise,AbsInt8)165 TEST(ElementWise, AbsInt8) {
166   std::vector<float> data = {15., 46., 78., -142., -1., -17., -49., 113.};
167   std::vector<float> abs_data(data.size());
168   for (int i = 0; i < abs_data.size(); i++) {
169     abs_data[i] = std::abs(data[i]);
170   }
171   const auto minmax = std::minmax_element(data.begin(), data.end());
172   const float abs_max = std::max(std::abs(*minmax.first), *minmax.second);
173   const float kInputScale = (*minmax.second - *minmax.first) / 255.0;
174   const float kOutputScale = abs_max / 255.0;
175   const int input_zero_point = 127 - *minmax.second;
176   const int output_zero_point = -128;
177   ElementWiseOpQuantizedModel m(
178       BuiltinOperator_ABS,
179       {TensorType_INT8,
180        {1, 8},
181        *minmax.first,
182        *minmax.second,
183        kInputScale,
184        input_zero_point,
185        true,
186        {kInputScale},
187        {input_zero_point}},
188       {TensorType_INT8, {1, 8}, 0, abs_max, kOutputScale, output_zero_point});
189   m.AsymmetricQuantizeAndPopulate<int8_t>(m.input(), data);
190   ASSERT_EQ(m.Invoke(), kTfLiteOk);
191   EXPECT_THAT(m.ExtractDequantVector<int8_t>(m.output()),
192               ElementsAreArray(ArrayFloatNear(abs_data, kInputScale)));
193 }
194 
TEST(ElementWise,AbsSameScaleInt8)195 TEST(ElementWise, AbsSameScaleInt8) {
196   std::vector<float> data = {15., 46., 78., -142., -1., -17., -49., 113.};
197   std::vector<float> abs_data(data.size());
198   for (int i = 0; i < abs_data.size(); i++) {
199     abs_data[i] = std::abs(data[i]);
200   }
201   const auto minmax = std::minmax_element(data.begin(), data.end());
202   const float abs_max = std::max(std::abs(*minmax.first), *minmax.second);
203   const float kInputScale = (*minmax.second - *minmax.first) / 255.0;
204   const int input_zero_point = 127 - *minmax.second;
205   ElementWiseOpQuantizedModel m(
206       BuiltinOperator_ABS,
207       {TensorType_INT8,
208        {1, 8},
209        *minmax.first,
210        *minmax.second,
211        kInputScale,
212        input_zero_point,
213        true,
214        {kInputScale},
215        {input_zero_point}},
216       {TensorType_INT8, {1, 8}, 0, abs_max, kInputScale, input_zero_point});
217   m.AsymmetricQuantizeAndPopulate<int8_t>(m.input(), data);
218   ASSERT_EQ(m.Invoke(), kTfLiteOk);
219   EXPECT_THAT(m.ExtractDequantVector<int8_t>(m.output()),
220               ElementsAreArray(ArrayFloatNear(abs_data, kInputScale)));
221 }
222 
TEST(ElementWise,AbsInt16)223 TEST(ElementWise, AbsInt16) {
224   const float kQuantizedTolerance = GetQuantizationStep<int16_t>(-150, 150);
225   std::vector<float> data = {15., 46., 78., -142., -1., -17., -49., 113.};
226   std::vector<float> abs_data(data.size());
227   for (int i = 0; i < abs_data.size(); i++) {
228     abs_data[i] = std::abs(data[i]);
229   }
230   ElementWiseOpQuantizedModel m(BuiltinOperator_ABS,
231                                 {TensorType_INT16, {1, 8}, -142, 142},
232                                 {TensorType_INT16, {1, 8}, -150, 150});
233   m.QuantizeAndPopulate<int16_t>(m.input(), data);
234   ASSERT_EQ(m.Invoke(), kTfLiteOk);
235   EXPECT_THAT(m.ExtractDequantVector<int16_t>(m.output()),
236               ElementsAreArray(ArrayFloatNear(abs_data, kQuantizedTolerance)));
237 }
238 
TEST(ElementWise,Sqrt)239 TEST(ElementWise, Sqrt) {
240   ElementWiseOpFloatModel m(BuiltinOperator_SQRT, {1, 1, 4, 1});
241   m.PopulateTensor<float>(m.input(), {0, 1, 2, 4});
242   ASSERT_EQ(m.Invoke(), kTfLiteOk);
243   EXPECT_THAT(m.ExtractVector<float>(m.output()),
244               ElementsAreArray(ArrayFloatNear({0, 1, 1.41421, 2})));
245   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
246 }
247 
TEST(ElementWise,Rsqrt)248 TEST(ElementWise, Rsqrt) {
249   ElementWiseOpFloatModel m(BuiltinOperator_RSQRT, {1, 1, 4, 1});
250   m.PopulateTensor<float>(m.input(), {1, 2, 4, 9});
251   ASSERT_EQ(m.Invoke(), kTfLiteOk);
252   EXPECT_THAT(m.ExtractVector<float>(m.output()),
253               ElementsAreArray(ArrayFloatNear({1, 0.7071, 0.5, 0.33333})));
254   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
255 }
256 
TEST(ElementWise,RsqrtInt8)257 TEST(ElementWise, RsqrtInt8) {
258   std::vector<float> data = {15., 46., 78., 142., 1., 17., 49., 113.};
259   std::vector<float> rsqrt_data(data.size());
260   for (int i = 0; i < rsqrt_data.size(); i++) {
261     rsqrt_data[i] = 1.f / std::sqrt(data[i]);
262   }
263   float kInputScale = 142.0 / 255.0;
264   float kOutputScale = 1.0 / 255.0;
265   int32_t zero_point = -128;
266   ElementWiseOpQuantizedModel m(BuiltinOperator_RSQRT,
267                                 {TensorType_INT8,
268                                  {1, 8},
269                                  0,
270                                  142.0,
271                                  kInputScale,
272                                  zero_point,
273                                  true,
274                                  {kInputScale},
275                                  {zero_point}},
276                                 {TensorType_INT8,
277                                  {1, 8},
278                                  0,
279                                  1.0,
280                                  kOutputScale,
281                                  zero_point,
282                                  true,
283                                  {kOutputScale},
284                                  {zero_point}});
285   m.QuantizeAndPopulate<int8_t>(m.input(), data);
286   ASSERT_EQ(m.Invoke(), kTfLiteOk);
287   EXPECT_THAT(m.ExtractDequantVector<int8_t>(m.output()),
288               ElementsAreArray(ArrayFloatNear(rsqrt_data, kInputScale)));
289 }
290 
TEST(ElementWise,RsqrtCloseTo0Int8)291 TEST(ElementWise, RsqrtCloseTo0Int8) {
292   std::vector<float> data = {15., 46., 78., 142., 0.1, 1., 49., 113.};
293   std::vector<float> rsqrt_data(data.size());
294   for (int i = 0; i < rsqrt_data.size(); i++) {
295     rsqrt_data[i] = 1.f / std::sqrt(data[i]);
296   }
297   float kInputScale = 142.0 / 255.0;
298   float kOutputScale = 3.16 / 255.0;
299   int32_t zero_point = -128;
300   ElementWiseOpQuantizedModel m(BuiltinOperator_RSQRT,
301                                 {TensorType_INT8,
302                                  {1, 8},
303                                  0,
304                                  142.0,
305                                  kInputScale,
306                                  zero_point,
307                                  true,
308                                  {kInputScale},
309                                  {zero_point}},
310                                 {TensorType_INT8,
311                                  {1, 8},
312                                  0,
313                                  3.16,
314                                  kOutputScale,
315                                  zero_point,
316                                  true,
317                                  {kOutputScale},
318                                  {zero_point}});
319   m.QuantizeAndPopulate<int8_t>(m.input(), data);
320   ASSERT_EQ(m.Invoke(), kTfLiteOk);
321   EXPECT_THAT(m.ExtractDequantVector<int8_t>(m.output()),
322               ElementsAreArray(ArrayFloatNear(rsqrt_data, kInputScale)));
323 }
324 
TEST(ElementWise,RsqrtNanInt8)325 TEST(ElementWise, RsqrtNanInt8) {
326   std::vector<float> data = {15., 46., 78., 142., 1., 17., -49., 113.};
327   std::vector<float> rsqrt_data(data.size());
328   for (int i = 0; i < rsqrt_data.size(); i++) {
329     rsqrt_data[i] = 1.f / std::sqrt(data[i]);
330   }
331   float kInputScale = 142.0 / 127.0;
332   float kOutputScale = 1.0 / 255.0;
333   int32_t input_zero_point = 0;
334   int32_t output_zero_point = -128;
335   ElementWiseOpQuantizedModel m(BuiltinOperator_RSQRT,
336                                 {TensorType_INT8,
337                                  {1, 8},
338                                  0,
339                                  142.0,
340                                  kInputScale,
341                                  input_zero_point,
342                                  true,
343                                  {kInputScale},
344                                  {input_zero_point}},
345                                 {TensorType_INT8,
346                                  {1, 8},
347                                  0,
348                                  1.0,
349                                  kOutputScale,
350                                  output_zero_point,
351                                  true,
352                                  {kOutputScale},
353                                  {output_zero_point}});
354   m.QuantizeAndPopulate<int8_t>(m.input(), data);
355   EXPECT_THAT(m.Invoke(), kTfLiteError);
356 }
357 
TEST(ElementWise,Square)358 TEST(ElementWise, Square) {
359   ElementWiseOpFloatModel m(BuiltinOperator_SQUARE, {1, 1, 4, 1});
360   m.PopulateTensor<float>(m.input(), {1, 2, 0.5, -3.0});
361   ASSERT_EQ(m.Invoke(), kTfLiteOk);
362   EXPECT_THAT(m.ExtractVector<float>(m.output()),
363               ElementsAreArray(ArrayFloatNear({1, 4.0, 0.25, 9.0})));
364   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
365 }
366 
TEST(ElementWise,LogicalNot)367 TEST(ElementWise, LogicalNot) {
368   ElementWiseOpBoolModel m(BuiltinOperator_LOGICAL_NOT, {1, 1, 4, 1});
369   m.PopulateTensor<bool>(m.input(), {true, false, true, false});
370   ASSERT_EQ(m.Invoke(), kTfLiteOk);
371   EXPECT_THAT(m.ExtractVector<bool>(m.output()),
372               ElementsAreArray({false, true, false, true}));
373   EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
374 }
375 
376 }  // namespace
377 }  // namespace tflite
378