1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "QuantizationTestHelper.hpp"
7
8 #include <armnn_delegate.hpp>
9
10 #include <flatbuffers/flatbuffers.h>
11 #include <schema_generated.h>
12
13 #include <doctest/doctest.h>
14
15 namespace armnnDelegate
16 {
17
18 // Dequantize operator test functions.
DequantizeUint8Test(std::vector<armnn::BackendId> & backends)19 void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
20 {
21 std::vector<int32_t> inputShape { 2, 4 };
22 std::vector<int32_t> outputShape { 2, 4 };
23
24 // Set input and output data
25 std::vector<uint8_t> inputValues
26 {
27 0, 1, 2, 3, // Lower bounds
28 252, 253, 254, 255 // Upper bounds
29 };
30 std::vector<float> expectedOutputValues
31 {
32 0.f, 1.f, 2.f, 3.f,
33 252.f, 253.f, 254.f, 255.f
34 };
35
36 QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
37 ::tflite::TensorType_UINT8,
38 ::tflite::TensorType_FLOAT32,
39 backends,
40 inputShape,
41 outputShape,
42 inputValues,
43 expectedOutputValues);
44 }
45
DequantizeInt8Test(std::vector<armnn::BackendId> & backends)46 void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
47 {
48 std::vector<int32_t> inputShape { 2, 4 };
49 std::vector<int32_t> outputShape { 2, 4 };
50
51 std::vector<int8_t> inputValues
52 {
53 -1, 0, 1, 2,
54 -128, -127, 126, 127
55 };
56 std::vector<float> expectedOutputValues
57 {
58 -1.f, 0.f, 1.f, 2.f,
59 -128.f, -127.f, 126.f, 127.f
60 };
61
62 QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
63 ::tflite::TensorType_INT8,
64 ::tflite::TensorType_FLOAT32,
65 backends,
66 inputShape,
67 outputShape,
68 inputValues,
69 expectedOutputValues);
70 }
71
DequantizeInt16Test(std::vector<armnn::BackendId> & backends)72 void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
73 {
74 std::vector<int32_t> inputShape { 2, 5 };
75 std::vector<int32_t> outputShape { 2, 5 };
76
77 std::vector<int16_t> inputValues
78 {
79 -1, 0, 1, 2,
80 -32768, -16384, 16384, 32767
81 };
82 std::vector<float> expectedOutputValues
83 {
84 -1.f, 0.f, 1.f, 2.f,
85 -32768.f, -16384.f, 16384.f, 32767.f
86 };
87
88 QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
89 ::tflite::TensorType_INT16,
90 ::tflite::TensorType_FLOAT32,
91 backends,
92 inputShape,
93 outputShape,
94 inputValues,
95 expectedOutputValues);
96 }
97
98 // Quantize operator test functions.
QuantizeFloat32Uint8Test(std::vector<armnn::BackendId> & backends)99 void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
100 {
101 std::vector<int32_t> inputShape { 2, 4 };
102 std::vector<int32_t> outputShape { 2, 4 };
103
104 // Set input and output data
105 std::vector<float> inputValues
106 {
107 -1.f, 0.f, 1.f, 2.f, // Lower bounds
108 252.f, 253.f, 255.f, 256.f // Upper bounds
109 };
110 std::vector<uint8_t> expectedOutputValues
111 {
112 0, 0, 1, 2,
113 252, 253, 255, 255
114 };
115
116 QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
117 ::tflite::TensorType_FLOAT32,
118 ::tflite::TensorType_UINT8,
119 backends,
120 inputShape,
121 outputShape,
122 inputValues,
123 expectedOutputValues);
124 }
125
QuantizeFloat32Int8Test(std::vector<armnn::BackendId> & backends)126 void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
127 {
128 std::vector<int32_t> inputShape { 2, 4 };
129 std::vector<int32_t> outputShape { 2, 4 };
130
131 std::vector<float> inputValues
132 {
133 -1.f, 0.f, 1.f, 2.f,
134 -128.5f, -127.f, 126.f, 127.5f
135 };
136 std::vector<int8_t> expectedOutputValues
137 {
138 -1, 0, 1, 2,
139 -128, -127, 126, 127
140 };
141
142 QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
143 ::tflite::TensorType_FLOAT32,
144 ::tflite::TensorType_INT8,
145 backends,
146 inputShape,
147 outputShape,
148 inputValues,
149 expectedOutputValues);
150 }
151
QuantizeFloat32Int16Test(std::vector<armnn::BackendId> & backends)152 void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
153 {
154 std::vector<int32_t> inputShape { 2, 4 };
155 std::vector<int32_t> outputShape { 2, 4 };
156
157 std::vector<float> inputValues
158 {
159 -1.f, 0.f, 1.f, 2.f,
160 -32768.5f, -16384.f, 16384.f, 32767.5f
161 };
162 std::vector<int16_t> expectedOutputValues
163 {
164 -1, 0, 1, 2,
165 -32768, -16384, 16384, 32767
166 };
167
168 QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
169 ::tflite::TensorType_FLOAT32,
170 ::tflite::TensorType_INT16,
171 backends,
172 inputShape,
173 outputShape,
174 inputValues,
175 expectedOutputValues);
176 }
177
QuantizeInt16Int16Test(std::vector<armnn::BackendId> & backends)178 void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
179 {
180 std::vector<int32_t> inputShape { 2, 4 };
181 std::vector<int32_t> outputShape { 2, 4 };
182
183 std::vector<int16_t> inputValues
184 {
185 -1, 0, 1, 2,
186 -32768, -16384, 16384, 32767
187 };
188 std::vector<int16_t> expectedOutputValues
189 {
190 -1, 0, 1, 2,
191 -32768, -16384, 16384, 32767
192 };
193
194 QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
195 ::tflite::TensorType_INT16,
196 ::tflite::TensorType_INT16,
197 backends,
198 inputShape,
199 outputShape,
200 inputValues,
201 expectedOutputValues);
202 }
203
QuantizeInt16Int8Test(std::vector<armnn::BackendId> & backends)204 void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
205 {
206 std::vector<int32_t> inputShape { 2, 4 };
207 std::vector<int32_t> outputShape { 2, 4 };
208
209 std::vector<int16_t> inputValues
210 {
211 -1, 0, 1, 2,
212 -32768, -16384, 16384, 32767
213 };
214 std::vector<int8_t> expectedOutputValues
215 {
216 -1, 0, 1, 2,
217 -128, -128, 127, 127
218 };
219
220 QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
221 ::tflite::TensorType_INT16,
222 ::tflite::TensorType_INT8,
223 backends,
224 inputShape,
225 outputShape,
226 inputValues,
227 expectedOutputValues);
228 }
229
QuantizeInt8Uint8Test(std::vector<armnn::BackendId> & backends)230 void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
231 {
232 std::vector<int32_t> inputShape { 2, 4 };
233 std::vector<int32_t> outputShape { 2, 4 };
234
235 std::vector<int8_t> inputValues
236 {
237 -1, 0, 1, 2,
238 -128, -127, 126, 127
239 };
240 std::vector<uint8_t> expectedOutputValues
241 {
242 0, 0, 1, 2,
243 0, 0, 126, 127
244 };
245
246 QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
247 ::tflite::TensorType_INT8,
248 ::tflite::TensorType_UINT8,
249 backends,
250 inputShape,
251 outputShape,
252 inputValues,
253 expectedOutputValues);
254 }
255
QuantizeUint8Int8Test(std::vector<armnn::BackendId> & backends)256 void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
257 {
258 std::vector<int32_t> inputShape { 2, 4 };
259 std::vector<int32_t> outputShape { 2, 4 };
260
261 std::vector<uint8_t> inputValues
262 {
263 0, 1, 2, 3,
264 126, 127, 254, 255
265 };
266 std::vector<int8_t> expectedOutputValues
267 {
268 0, 1, 2, 3,
269 126, 127, 127, 127
270 };
271
272 QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
273 ::tflite::TensorType_UINT8,
274 ::tflite::TensorType_INT8,
275 backends,
276 inputShape,
277 outputShape,
278 inputValues,
279 expectedOutputValues);
280 }
281
282 TEST_SUITE("CpuRef_QuantizationTests")
283 {
284
285 TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
286 {
287 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
288 DequantizeUint8Test(backends);
289 }
290
291
292 TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
293 {
294 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
295 DequantizeInt8Test(backends);
296 }
297
298
299 TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
300 {
301 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
302 DequantizeInt16Test(backends);
303 }
304
305
306 TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
307 {
308 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
309 QuantizeFloat32Uint8Test(backends);
310 }
311
312
313 TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
314 {
315 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
316 QuantizeFloat32Int8Test(backends);
317 }
318
319
320 TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
321 {
322 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
323 QuantizeFloat32Int16Test(backends);
324 }
325
326
327 TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
328 {
329 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
330 QuantizeInt16Int16Test(backends);
331 }
332
333
334 TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
335 {
336 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
337 QuantizeInt16Int8Test(backends);
338 }
339
340
341
342 TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
343 {
344 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
345 QuantizeInt8Uint8Test(backends);
346 }
347
348
349 TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
350 {
351 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
352 QuantizeUint8Int8Test(backends);
353 }
354
355 }
356
357 TEST_SUITE("CpuAcc_QuantizationTests")
358 {
359
360 // Dequantize Operator Tests
361 TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
362 {
363 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
364 DequantizeUint8Test(backends);
365 }
366
367 TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
368 {
369 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
370 DequantizeInt8Test(backends);
371 }
372
373 TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
374 {
375 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
376 DequantizeInt16Test(backends);
377 }
378
379 // Quantize Operator Tests
380 TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
381 {
382 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
383 QuantizeFloat32Uint8Test(backends);
384 }
385
386 TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
387 {
388 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
389 QuantizeFloat32Int8Test(backends);
390 }
391
392 TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
393 {
394 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
395 QuantizeInt8Uint8Test(backends);
396 }
397
398 TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
399 {
400 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
401 QuantizeUint8Int8Test(backends);
402 }
403
404 }
405
406 TEST_SUITE("GpuAcc_QuantizationTests")
407 {
408
409 // Dequantize Operator Tests
410 TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
411 {
412 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
413 DequantizeUint8Test(backends);
414 }
415
416 TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
417 {
418 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
419 DequantizeInt8Test(backends);
420 }
421
422 TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
423 {
424 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
425 DequantizeInt16Test(backends);
426 }
427
428 // Quantize Operator Tests
429 TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
430 {
431 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
432 QuantizeFloat32Uint8Test(backends);
433 }
434
435 TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
436 {
437 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
438 QuantizeFloat32Int8Test(backends);
439 }
440
441 TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
442 {
443 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
444 QuantizeInt8Uint8Test(backends);
445 }
446
447 TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
448 {
449 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
450 QuantizeUint8Int8Test(backends);
451 }
452
453 }
454
455 } // namespace armnnDelegate