xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/mul_test.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <stddef.h>
16 #include <stdint.h>
17 
18 #include <complex>
19 #include <vector>
20 
21 #include <gmock/gmock.h>
22 #include <gtest/gtest.h>
23 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
24 #include "tensorflow/lite/kernels/test_util.h"
25 #include "tensorflow/lite/schema/schema_generated.h"
26 
27 namespace tflite {
28 namespace {
29 
30 using ::testing::ElementsAreArray;
31 
32 class BaseMulOpModel : public SingleOpModel {
33  public:
BaseMulOpModel(const TensorData & input1,const TensorData & input2,const TensorData & output,ActivationFunctionType activation_type)34   BaseMulOpModel(const TensorData& input1, const TensorData& input2,
35                  const TensorData& output,
36                  ActivationFunctionType activation_type) {
37     input1_ = AddInput(input1);
38     input2_ = AddInput(input2);
39     output_ = AddOutput(output);
40     SetBuiltinOp(BuiltinOperator_MUL, BuiltinOptions_MulOptions,
41                  CreateMulOptions(builder_, activation_type).Union());
42     BuildInterpreter({GetShape(input1_), GetShape(input2_)});
43   }
44 
input1()45   int input1() { return input1_; }
input2()46   int input2() { return input2_; }
47 
48  protected:
49   int input1_;
50   int input2_;
51   int output_;
52 };
53 
54 class FloatMulOpModel : public BaseMulOpModel {
55  public:
56   using BaseMulOpModel::BaseMulOpModel;
57 
GetOutput()58   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
59 };
60 
61 class ComplexMulOpModel : public BaseMulOpModel {
62  public:
63   using BaseMulOpModel::BaseMulOpModel;
64 
GetOutput()65   std::vector<std::complex<float>> GetOutput() {
66     return ExtractVector<std::complex<float>>(output_);
67   }
68 };
69 
70 class IntegerMulOpModel : public BaseMulOpModel {
71  public:
72   using BaseMulOpModel::BaseMulOpModel;
73 
GetOutput()74   std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
75 };
76 
77 // For quantized Mul, the error shouldn't exceed (2*step + step^2).
78 // The param min=-1.0 & max=1.0 is used in the following tests.
79 // The tolerance value is ~0.0157.
80 const float kQuantizedStep = 2.0 / 255.0;
81 const float kQuantizedTolerance =
82     2.0 * kQuantizedStep + kQuantizedStep * kQuantizedStep;
83 const float kQuantizedStepInt16 = 2.0 / 32767.0;
84 const float kQuantizedToleranceInt16 =
85     2.0 * kQuantizedStepInt16 + kQuantizedStepInt16 * kQuantizedStepInt16;
86 
87 class QuantizedMulOpModel : public BaseMulOpModel {
88  public:
89   using BaseMulOpModel::BaseMulOpModel;
90 
91   template <typename integer_dtype>
GetDequantizedOutput()92   std::vector<float> GetDequantizedOutput() {
93     return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
94                                      GetScale(output_), GetZeroPoint(output_));
95   }
96 
GetDequantizedOutputInt16()97   std::vector<float> GetDequantizedOutputInt16() {
98     return Dequantize<int16_t>(ExtractVector<int16_t>(output_),
99                                GetScale(output_), GetZeroPoint(output_));
100   }
101 };
102 
TEST(FloatMulOpTest,NoActivation)103 TEST(FloatMulOpTest, NoActivation) {
104   FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
105                     {TensorType_FLOAT32, {1, 2, 2, 1}},
106                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
107   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
108   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
109   ASSERT_EQ(m.Invoke(), kTfLiteOk);
110   EXPECT_THAT(m.GetOutput(),
111               ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
112 }
113 
TEST(FloatMulOpTest,ActivationRELU_N1_TO_1)114 TEST(FloatMulOpTest, ActivationRELU_N1_TO_1) {
115   FloatMulOpModel m(
116       {TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
117       {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
118   m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
119   m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 5});
120   ASSERT_EQ(m.Invoke(), kTfLiteOk);
121   EXPECT_THAT(m.GetOutput(),
122               ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 1.0})));
123 }
124 
TEST(FloatMulOpTest,VariousInputShapes)125 TEST(FloatMulOpTest, VariousInputShapes) {
126   const std::vector<std::vector<int>> test_shapes = {
127       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
128   for (int i = 0; i < test_shapes.size(); ++i) {
129     FloatMulOpModel m({TensorType_FLOAT32, test_shapes[i]},
130                       {TensorType_FLOAT32, test_shapes[i]},
131                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
132     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
133     m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 1.1, 0.1});
134     ASSERT_EQ(m.Invoke(), kTfLiteOk);
135     EXPECT_THAT(
136         m.GetOutput(),
137         ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4, 1.21, 0.2})))
138         << "With shape number " << i;
139   }
140 }
141 
TEST(FloatMulOpTest,WithScalarBroadcast)142 TEST(FloatMulOpTest, WithScalarBroadcast) {
143   const std::vector<std::vector<int>> test_shapes = {
144       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
145   for (int i = 0; i < test_shapes.size(); ++i) {
146     FloatMulOpModel m({TensorType_FLOAT32, test_shapes[i]},
147                       {TensorType_FLOAT32, {}},  // always a scalar
148                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
149     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
150     m.PopulateTensor<float>(m.input2(), {0.1});
151     ASSERT_EQ(m.Invoke(), kTfLiteOk);
152     EXPECT_THAT(
153         m.GetOutput(),
154         ElementsAreArray(ArrayFloatNear({-0.2, 0.02, 0.07, 0.08, 0.11, 0.2})))
155         << "With shape number " << i;
156   }
157 }
158 
TEST(FloatMulOpTest,WithBroadcast)159 TEST(FloatMulOpTest, WithBroadcast) {
160   const std::vector<std::vector<int>> test_shapes = {
161       {2, 4}, {2, 1, 4}, {1, 2, 4}, {1, 2, 1, 4}};
162   for (int i = 0; i < test_shapes.size(); ++i) {
163     FloatMulOpModel m({TensorType_FLOAT32, test_shapes[i]},
164                       {TensorType_FLOAT32, {4}},  // always a scalar
165                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
166     m.PopulateTensor<float>(m.input1(),
167                             {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0, 1.1, 0.8});
168     m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.4});
169     ASSERT_EQ(m.Invoke(), kTfLiteOk);
170     EXPECT_THAT(m.GetOutput(),
171                 ElementsAreArray(ArrayFloatNear(
172                     {-0.2, 0.04, 0.21, 0.32, 0.11, 0.4, 0.33, 0.32})))
173         << "With shape number " << i;
174   }
175 }
176 
TEST(FloatMulOpTest,MixedBroadcast)177 TEST(FloatMulOpTest, MixedBroadcast) {
178   const std::vector<int> base_shape = {2, 3, 1, 2};
179   const std::vector<std::vector<int>> test_shapes = {
180       {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
181   const std::vector<std::vector<float>> test_outputs = {
182       {-0.06f, 0.69f,  0.12f,  1.15f, -0.30f, 2.07f,  0.18f,  0.15f, -0.36f,
183        0.25f,  0.90f,  0.45f,  0.16f, -0.33f, -0.32f, -0.55f, 0.80f, -0.99f,
184        0.24f,  0.84f,  -0.48f, 1.40f, 1.20f,  2.52f,  -0.32f, 0.00f, 0.64f,
185        0.00f,  -1.60f, 0.00f,  0.14f, -0.66f, -0.28f, -1.10f, 0.70f, -1.98f},
186       {-0.06f, 0.69f, -0.36f, 0.25f, 0.80f, -0.99f, 0.24f, 0.84f, 0.64f, 0.00f,
187        0.70f, -1.98f},
188       {-0.06f, 0.46f,  -0.09f, 0.69f, 0.12f,  -0.92f, 0.18f,  0.10f,  0.27f,
189        0.15f,  -0.36f, -0.20f, 0.16f, -0.22f, 0.24f,  -0.33f, -0.32f, 0.44f,
190        0.60f,  1.40f,  1.20f,  2.80f, 1.08f,  2.52f,  -0.80f, 0.00f,  -1.60f,
191        0.00f,  -1.44f, 0.00f,  0.35f, -1.10f, 0.70f,  -2.20f, 0.63f,  -1.98f},
192       {-0.06f, 0.46f, 0.27f, 0.15f, -0.32f, 0.44f, 0.60f, 1.40f, -1.60f, 0.00f,
193        0.63f, -1.98f}};
194   for (size_t i = 0; i < test_shapes.size(); ++i) {
195     FloatMulOpModel model_fixture(
196         {TensorType_FLOAT32, base_shape}, {TensorType_FLOAT32, test_shapes[i]},
197         {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
198     model_fixture.PopulateTensor<float>(
199         model_fixture.input1(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
200                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
201     model_fixture.PopulateTensor<float>(model_fixture.input2(),
202                                         {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
203     ASSERT_EQ(model_fixture.Invoke(), kTfLiteOk);
204 
205     EXPECT_THAT(model_fixture.GetOutput(),
206                 ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
207         << "With shape number " << i;
208   }
209   // Re-run with exchanged inputs.
210   for (size_t i = 0; i < test_shapes.size(); ++i) {
211     FloatMulOpModel model_fixture(
212         {TensorType_FLOAT32, test_shapes[i]}, {TensorType_FLOAT32, base_shape},
213         {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
214     model_fixture.PopulateTensor<float>(model_fixture.input1(),
215                                         {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
216     model_fixture.PopulateTensor<float>(
217         model_fixture.input2(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
218                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
219     ASSERT_EQ(model_fixture.Invoke(), kTfLiteOk);
220     EXPECT_THAT(model_fixture.GetOutput(),
221                 ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
222         << "With shape number " << i;
223   }
224 }
225 
TEST(FloatMulOpTest,WithBroadcast2Elements)226 TEST(FloatMulOpTest, WithBroadcast2Elements) {
227   const std::vector<std::vector<int>> test_shapes = {
228       {2, 2}, {2, 1, 2}, {1, 2, 2}, {1, 2, 1, 2}};
229   for (int i = 0; i < test_shapes.size(); ++i) {
230     FloatMulOpModel m({TensorType_FLOAT32, test_shapes[i]},
231                       {TensorType_FLOAT32, {2}},  // always a scalar
232                       {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
233     m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
234     m.PopulateTensor<float>(m.input2(), {0.1, 0.2});
235     ASSERT_EQ(m.Invoke(), kTfLiteOk);
236     EXPECT_THAT(m.GetOutput(),
237                 ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.07, 0.16})))
238         << "With shape number " << i;
239   }
240 }
241 
TEST(FloatMulOpTest,ScalarAndOneElement)242 TEST(FloatMulOpTest, ScalarAndOneElement) {
243   FloatMulOpModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {}},
244                     {TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
245   m.PopulateTensor<float>(m.input1(), {0.8});
246   m.PopulateTensor<float>(m.input2(), {0.5});
247   ASSERT_EQ(m.Invoke(), kTfLiteOk);
248   EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({0.4})));
249 }
250 
TEST(IntegerMulOpTest,NoActivation)251 TEST(IntegerMulOpTest, NoActivation) {
252   IntegerMulOpModel m({TensorType_INT32, {1, 2, 2, 1}},
253                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
254                       ActivationFunctionType_NONE);
255   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
256   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
257   ASSERT_EQ(m.Invoke(), kTfLiteOk);
258   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 4, 21, 40}));
259 }
260 
TEST(ComplexMulOpTest,BaseTest)261 TEST(ComplexMulOpTest, BaseTest) {
262   ComplexMulOpModel m({TensorType_COMPLEX64, {1, 2, 2, 1}},
263                       {TensorType_COMPLEX64, {1, 2, 2, 1}},
264                       {TensorType_COMPLEX64, {}}, ActivationFunctionType_NONE);
265   m.PopulateTensor<std::complex<float>>(m.input1(), {-20, {2, 3}, {7, 2}, 8});
266   m.PopulateTensor<std::complex<float>>(m.input2(), {1, {2, -3}, {3, -4}, 5});
267   ASSERT_EQ(m.Invoke(), kTfLiteOk);
268   std::complex<float> expected_result[4] = {-20, 13, {29, -22}, 40};
269   EXPECT_THAT(m.GetOutput(), ElementsAreArray(expected_result));
270 }
271 
TEST(ComplexMulOpTest,WithBroadcast)272 TEST(ComplexMulOpTest, WithBroadcast) {
273   const std::vector<std::vector<int>> test_shapes = {
274       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
275   for (int i = 0; i < test_shapes.size(); ++i) {
276     ComplexMulOpModel m({TensorType_COMPLEX64, test_shapes[i]},
277                         {TensorType_COMPLEX64, {}}, {TensorType_COMPLEX64, {}},
278                         ActivationFunctionType_NONE);
279     m.PopulateTensor<std::complex<float>>(m.input1(), {-20, 2, 7, 8, 11, 20});
280     m.PopulateTensor<std::complex<float>>(m.input2(), {1});
281     ASSERT_EQ(m.Invoke(), kTfLiteOk);
282     EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 2, 7, 8, 11, 20}))
283         << "With shape number " << i;
284   }
285 }
286 
TEST(ComplexMulOpTest,IncompatibleActivation)287 TEST(ComplexMulOpTest, IncompatibleActivation) {
288   ComplexMulOpModel m({TensorType_COMPLEX64, {1, 2, 2, 1}},
289                       {TensorType_COMPLEX64, {1, 2, 2, 1}},
290                       {TensorType_COMPLEX64, {}},
291                       ActivationFunctionType_RELU_N1_TO_1);
292   m.PopulateTensor<std::complex<float>>(m.input1(), {-20, {2, 3}, {7, 2}, 8});
293   m.PopulateTensor<std::complex<float>>(m.input2(), {1, {2, -3}, {3, -4}, 5});
294   ASSERT_EQ(m.Invoke(), kTfLiteError);
295 }
296 
TEST(IntegerMulOpTest,ActivationRELU_N1_TO_1)297 TEST(IntegerMulOpTest, ActivationRELU_N1_TO_1) {
298   IntegerMulOpModel m({TensorType_INT32, {1, 2, 2, 1}},
299                       {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
300                       ActivationFunctionType_RELU_N1_TO_1);
301   m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
302   m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
303   ASSERT_EQ(m.Invoke(), kTfLiteOk);
304   EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 1, 1, 1}));
305 }
306 
TEST(IntegerMulOpTest,VariousInputShapes)307 TEST(IntegerMulOpTest, VariousInputShapes) {
308   const std::vector<std::vector<int>> test_shapes = {
309       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
310   for (int i = 0; i < test_shapes.size(); ++i) {
311     IntegerMulOpModel m({TensorType_INT32, test_shapes[i]},
312                         {TensorType_INT32, test_shapes[i]},
313                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
314     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
315     m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
316     ASSERT_EQ(m.Invoke(), kTfLiteOk);
317     EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 4, 21, 40, 121, 20}))
318         << "With shape number " << i;
319   }
320 }
321 
TEST(IntegerMulOpTest,WithBroadcast)322 TEST(IntegerMulOpTest, WithBroadcast) {
323   const std::vector<std::vector<int>> test_shapes = {
324       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
325   for (int i = 0; i < test_shapes.size(); ++i) {
326     IntegerMulOpModel m({TensorType_INT32, test_shapes[i]},
327                         {TensorType_INT32, {}},  // always a scalar
328                         {TensorType_INT32, {}}, ActivationFunctionType_NONE);
329     m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
330     m.PopulateTensor<int32_t>(m.input2(), {1});
331     ASSERT_EQ(m.Invoke(), kTfLiteOk);
332     EXPECT_THAT(m.GetOutput(),
333                 ElementsAreArray(ArrayFloatNear({-20, 2, 7, 8, 11, 20})))
334         << "With shape number " << i;
335   }
336 }
337 
338 template <TensorType tensor_type, typename integer_dtype>
NoActivation()339 void NoActivation() {
340   QuantizedMulOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
341                         {tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
342                         {tensor_type, {}, -1.0, 1.0},
343                         ActivationFunctionType_NONE);
344   m.QuantizeAndPopulate<integer_dtype>(m.input1(), {-0.8, 0.2, 0.9, 0.7});
345   m.QuantizeAndPopulate<integer_dtype>(m.input2(), {0.6, 0.4, 0.9, 0.8});
346   ASSERT_EQ(m.Invoke(), kTfLiteOk);
347   EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
348               ElementsAreArray(ArrayFloatNear({-0.48, 0.08, 0.81, 0.56},
349                                               kQuantizedTolerance)));
350 }
351 
352 template <TensorType tensor_type, typename integer_dtype>
NoActivationLargeMultiplier()353 void NoActivationLargeMultiplier() {
354   // Intentionally pathological output range much narrower than needed
355   // to represent input values to exercise the multiplier>1 case.
356   QuantizedMulOpModel m({tensor_type, {1, 2, 2, 1}, -100, 100},
357                         {tensor_type, {1, 2, 2, 1}, -100, 100},
358                         {tensor_type, {}, -10, 10},
359                         ActivationFunctionType_NONE);
360   m.QuantizeAndPopulate<integer_dtype>(m.input1(), {-4, 2, 3, 1});
361   m.QuantizeAndPopulate<integer_dtype>(m.input2(), {-1, -3, 4, 2});
362   ASSERT_EQ(m.Invoke(), kTfLiteOk);
363   // Note the large tolerance. This computation is inherently inaccurate.
364   const float kTolerance = 1.4f;
365   EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
366               ElementsAreArray(ArrayFloatNear({4, -6, 10, 2}, kTolerance)));
367 }
368 
TEST(QuantizedMulOpTest,NoActivationUInt8)369 TEST(QuantizedMulOpTest, NoActivationUInt8) {
370   NoActivation<TensorType_UINT8, uint8_t>();
371   NoActivationLargeMultiplier<TensorType_UINT8, uint8_t>();
372 }
373 
TEST(QuantizedMulOpTest,NoActivationInt8)374 TEST(QuantizedMulOpTest, NoActivationInt8) {
375   NoActivation<TensorType_INT8, int8_t>();
376   NoActivationLargeMultiplier<TensorType_INT8, int8_t>();
377 }
378 
TEST(QuantizedMulOpTest,NoActivationInt16)379 TEST(QuantizedMulOpTest, NoActivationInt16) {
380   const float kMin = -1.f;
381   const float kMax = 32767.f / 32768.f;
382   QuantizedMulOpModel m({TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
383                         {TensorType_INT16, {1, 2, 2, 1}, kMin, kMax},
384                         {TensorType_INT16, {}, kMin, kMax},
385                         ActivationFunctionType_NONE);
386   m.QuantizeAndPopulate<int16_t>(m.input1(), {-0.8, 0.2, 0.9, 0.7});
387   m.QuantizeAndPopulate<int16_t>(m.input2(), {0.6, 0.4, 0.9, 0.8});
388   ASSERT_EQ(m.Invoke(), kTfLiteOk);
389   EXPECT_THAT(m.GetDequantizedOutputInt16(),
390               ElementsAreArray(ArrayFloatNear({-0.48, 0.08, 0.81, 0.56},
391                                               kQuantizedToleranceInt16)));
392 }
393 
TEST(QuantizedMulOpTest,NoActivationInt16Scaled)394 TEST(QuantizedMulOpTest, NoActivationInt16Scaled) {
395   const float kMin = -2.f;
396   const float kMax = 2.f * 32767.f / 32768.f;
397   QuantizedMulOpModel m({TensorType_INT16, {1, 2, 3, 1}, kMin, kMax},
398                         {TensorType_INT16, {1, 2, 3, 1}, 2 * kMin, 2 * kMax},
399                         {TensorType_INT16, {}, 8 * kMin, 8 * kMax},
400                         ActivationFunctionType_NONE);
401   m.QuantizeAndPopulate<int16_t>(m.input1(), {-1.8, 0.2, 0.9, 1.7, 0.1, -1.95});
402   m.QuantizeAndPopulate<int16_t>(m.input2(),
403                                  {3.6, -3.4, 3.9, 0.8, -1.0, -3.95});
404   ASSERT_EQ(m.Invoke(), kTfLiteOk);
405 
406   const float kQuantizedToleranceInt16Scaled =
407       6.0 * kQuantizedStepInt16 + kQuantizedStepInt16 * kQuantizedStepInt16;
408 
409   EXPECT_THAT(
410       m.GetDequantizedOutputInt16(),
411       ElementsAreArray(ArrayFloatNear({-6.48, -0.68, 3.51, 1.36, -0.1, 7.7025},
412                                       kQuantizedToleranceInt16Scaled)));
413 }
414 
415 template <TensorType tensor_type, typename integer_dtype>
NoActivationInt16With8BitOutput()416 void NoActivationInt16With8BitOutput() {
417   const float kMinInt16 = -1.f;
418   const float kMaxInt16 = 32767.f / 32768.f;
419   const float kMinUint8 = -1.f;
420   const float kMaxUint8 = 127.f / 128.f;
421   QuantizedMulOpModel m({TensorType_INT16, {1, 2, 2, 1}, kMinInt16, kMaxInt16},
422                         {TensorType_INT16, {1, 2, 2, 1}, kMinInt16, kMaxInt16},
423                         {tensor_type, {}, kMinUint8, kMaxUint8},
424                         ActivationFunctionType_NONE);
425   m.QuantizeAndPopulate<int16_t>(m.input1(), {-0.8, 0.2, 0.9, 0.7});
426   m.QuantizeAndPopulate<int16_t>(m.input2(), {0.6, 0.4, 0.9, 0.8});
427   ASSERT_EQ(m.Invoke(), kTfLiteOk);
428   EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
429               ElementsAreArray(ArrayFloatNear({-0.48, 0.08, 0.81, 0.56},
430                                               kQuantizedTolerance)));
431 }
432 
TEST(QuantizedMulOpTest,NoActivationInt16WithUint8Output)433 TEST(QuantizedMulOpTest, NoActivationInt16WithUint8Output) {
434   NoActivationInt16With8BitOutput<TensorType_UINT8, uint8_t>();
435 }
436 
TEST(QuantizedMulOpTest,NoActivationInt16Withint8Output)437 TEST(QuantizedMulOpTest, NoActivationInt16Withint8Output) {
438   NoActivationInt16With8BitOutput<TensorType_INT8, int8_t>();
439 }
440 
441 // for quantized Mul, the error shouldn't exceed 2*step
GetTolerance(int min,int max)442 float GetTolerance(int min, int max) {
443   float kQuantizedStep = (max - min) / 255.0;
444   float kQuantizedTolerance = 2.0 * kQuantizedStep;
445   return kQuantizedTolerance;
446 }
447 
448 template <TensorType tensor_type, typename integer_dtype>
WithBroadcast()449 void WithBroadcast() {
450   const float kQuantizedTolerance = GetTolerance(-3.0, 3.0);
451   const std::vector<std::vector<int>> test_shapes = {
452       {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
453   // Test with a smaller than 1 and greater than 1 quantization multiplier
454   const std::vector<std::pair<float, float>> test_input_range = {{-3.0, 3.0},
455                                                                  {-6.0, 6.0}};
456   for (int i = 0; i < test_shapes.size(); ++i) {
457     for (int j = 0; j < test_input_range.size(); ++j) {
458       const std::pair<float, float>& input_range = test_input_range[j];
459       QuantizedMulOpModel m(
460           {tensor_type, test_shapes[i], input_range.first, input_range.second},
461           {tensor_type, {}, input_range.first, input_range.second},
462           {tensor_type, {}, -0.2, 0.2}, ActivationFunctionType_NONE);
463       m.QuantizeAndPopulate<integer_dtype>(m.input1(),
464                                            {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
465       m.QuantizeAndPopulate<integer_dtype>(m.input2(), {0.1});
466       ASSERT_EQ(m.Invoke(), kTfLiteOk);
467       EXPECT_THAT(
468           m.GetDequantizedOutput<integer_dtype>(),
469           ElementsAreArray(ArrayFloatNear({-0.2, 0.02, 0.07, 0.08, 0.11, 0.2},
470                                           kQuantizedTolerance)))
471           << "With shape number " << i << " and range number " << j;
472     }
473   }
474 }
475 
476 template <enum TensorType tensor_type, typename integer_dtype>
QuantizedWithMixedBroadcast()477 void QuantizedWithMixedBroadcast() {
478   const float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
479   const std::vector<int> base_shape = {2, 3, 1, 2};
480   const std::vector<std::vector<int>> test_shapes = {
481       {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
482   const std::vector<std::vector<float>> test_outputs = {
483       {-0.06f, 0.69f,  0.12f,  1.15f, -0.30f, 2.07f,  0.18f,  0.15f, -0.36f,
484        0.25f,  0.90f,  0.45f,  0.16f, -0.33f, -0.32f, -0.55f, 0.80f, -0.99f,
485        0.24f,  0.84f,  -0.48f, 1.40f, 1.20f,  2.52f,  -0.32f, 0.00f, 0.64f,
486        0.00f,  -1.60f, 0.00f,  0.14f, -0.66f, -0.28f, -1.10f, 0.70f, -1.98f},
487       {-0.06f, 0.69f, -0.36f, 0.25f, 0.80f, -0.99f, 0.24f, 0.84f, 0.64f, 0.00f,
488        0.70f, -1.98f},
489       {-0.06f, 0.46f,  -0.09f, 0.69f, 0.12f,  -0.92f, 0.18f,  0.10f,  0.27f,
490        0.15f,  -0.36f, -0.20f, 0.16f, -0.22f, 0.24f,  -0.33f, -0.32f, 0.44f,
491        0.60f,  1.40f,  1.20f,  2.80f, 1.08f,  2.52f,  -0.80f, 0.00f,  -1.60f,
492        0.00f,  -1.44f, 0.00f,  0.35f, -1.10f, 0.70f,  -2.20f, 0.63f,  -1.98f},
493       {-0.06f, 0.46f, 0.27f, 0.15f, -0.32f, 0.44f, 0.60f, 1.40f, -1.60f, 0.00f,
494        0.63f, -1.98f}};
495   for (size_t i = 0; i < test_shapes.size(); ++i) {
496     QuantizedMulOpModel model_fixture({tensor_type, base_shape, -3.f, 3.f},
497                                       {tensor_type, test_shapes[i], -3.f, 3.f},
498                                       {tensor_type, {}, -3.f, 3.f},
499                                       ActivationFunctionType_NONE);
500     model_fixture.QuantizeAndPopulate<integer_dtype>(
501         model_fixture.input1(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
502                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
503     model_fixture.QuantizeAndPopulate<integer_dtype>(
504         model_fixture.input2(), {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
505     ASSERT_EQ(model_fixture.Invoke(), kTfLiteOk);
506     EXPECT_THAT(
507         model_fixture.GetDequantizedOutput<integer_dtype>(),
508         ElementsAreArray(ArrayFloatNear(test_outputs[i], kQuantizedTolerance)))
509         << "With shape number " << i;
510   }
511   // Re-run with exchanged inputs.
512   for (size_t i = 0; i < test_shapes.size(); ++i) {
513     QuantizedMulOpModel model_fixture({tensor_type, test_shapes[i], -3.f, 3.f},
514                                       {tensor_type, base_shape, -3.f, 3.f},
515                                       {tensor_type, {}, -3.f, 3.f},
516                                       ActivationFunctionType_NONE);
517     model_fixture.QuantizeAndPopulate<integer_dtype>(
518         model_fixture.input1(), {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f});
519     model_fixture.QuantizeAndPopulate<integer_dtype>(
520         model_fixture.input2(), {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f, 1.2f,
521                                  2.8f, -1.6f, 0.0f, 0.7f, -2.2f});
522     ASSERT_EQ(model_fixture.Invoke(), kTfLiteOk);
523     EXPECT_THAT(
524         model_fixture.GetDequantizedOutput<integer_dtype>(),
525         ElementsAreArray(ArrayFloatNear(test_outputs[i], kQuantizedTolerance)))
526         << "With shape number " << i;
527   }
528 }
529 
TEST(QuantizedMulOpTest,WithBroadcastUInt8)530 TEST(QuantizedMulOpTest, WithBroadcastUInt8) {
531   WithBroadcast<TensorType_UINT8, uint8_t>();
532 }
533 
TEST(QuantizedMulOpTest,WithBroadcastInt8)534 TEST(QuantizedMulOpTest, WithBroadcastInt8) {
535   WithBroadcast<TensorType_INT8, int8_t>();
536 }
537 
TEST(QuantizedMulOpTest,QuantizedWithMixedBroadcastUInt8)538 TEST(QuantizedMulOpTest, QuantizedWithMixedBroadcastUInt8) {
539   QuantizedWithMixedBroadcast<TensorType_UINT8, uint8_t>();
540 }
541 
TEST(QuantizedMulOpTest,QuantizedWithMixedBroadcastInt8)542 TEST(QuantizedMulOpTest, QuantizedWithMixedBroadcastInt8) {
543   QuantizedWithMixedBroadcast<TensorType_INT8, int8_t>();
544 }
545 
546 }  // namespace
547 }  // namespace tflite
548