1 // 2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #include "ParserFlatbuffersFixture.hpp" 7 8 9 TEST_SUITE("TensorflowLiteParser_Quantize") 10 { 11 struct QuantizeFixture : public ParserFlatbuffersFixture 12 { QuantizeFixtureQuantizeFixture13 explicit QuantizeFixture(const std::string& inputShape, 14 const std::string& outputShape, 15 const std::string& dataType, 16 const std::string& zeroPoint = "[ 0 ]") 17 { 18 m_JsonString = R"( 19 { 20 "version": 3, 21 "operator_codes": [ { "builtin_code": "QUANTIZE" } ], 22 "subgraphs": [ { 23 "tensors": [ 24 { 25 "shape": )" + inputShape + R"(, 26 "type": "FLOAT32", 27 "buffer": 0, 28 "name": "inputTensor", 29 "quantization": { 30 "min": [ 0.0 ], 31 "max": [ 255.0 ], 32 "scale": [ 1.0 ], 33 "zero_point": )" + zeroPoint + R"(, 34 } 35 }, 36 { 37 "shape": )" + outputShape + R"( , 38 "type": )" + dataType + R"(, 39 "buffer": 1, 40 "name": "outputTensor", 41 "quantization": { 42 "min": [ 0.0 ], 43 "max": [ 255.0 ], 44 "scale": [ 1.5 ], 45 "zero_point": )" + zeroPoint + R"(, 46 } 47 } 48 ], 49 "inputs": [ 0 ], 50 "outputs": [ 1 ], 51 "operators": [ 52 { 53 "opcode_index": 0, 54 "inputs": [ 0 ], 55 "outputs": [ 1 ], 56 "builtin_options_type": "QuantizeOptions", 57 "builtin_options": { 58 }, 59 "custom_options_format": "FLEXBUFFERS" 60 } 61 ], 62 } ], 63 "buffers" : [ 64 { }, 65 { }, 66 ] 67 } 68 )"; 69 SetupSingleInputSingleOutput("inputTensor", "outputTensor"); 70 } 71 }; 72 73 struct SimpleQuantizeFixtureQAsymm8 : QuantizeFixture 74 { SimpleQuantizeFixtureQAsymm8SimpleQuantizeFixtureQAsymm875 SimpleQuantizeFixtureQAsymm8() : QuantizeFixture("[ 1, 6 ]", 76 "[ 1, 6 ]", 77 "UINT8") {} 78 }; 79 80 TEST_CASE_FIXTURE(SimpleQuantizeFixtureQAsymm8, "SimpleQuantizeFixtureQAsymm8") 81 { 82 RunTest<2, armnn::DataType::Float32, armnn::DataType::QAsymmU8>( 83 0, 84 {{"inputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}}, 85 {{"outputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}}); 86 } 87 88 struct SimpleQuantizeFixtureQSymm16 : QuantizeFixture 89 { SimpleQuantizeFixtureQSymm16SimpleQuantizeFixtureQSymm1690 SimpleQuantizeFixtureQSymm16() : QuantizeFixture("[ 1, 6 ]", 91 "[ 1, 6 ]", 92 "INT16") {} 93 }; 94 95 TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymm16, "SimpleQuantizeQsymm16") 96 { 97 RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS16>( 98 0, 99 {{"inputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}}, 100 {{"outputTensor", { 0, 1, 5, 32767, -1, -32768 }}}); 101 } 102 103 struct SimpleQuantizeFixtureQSymmS8 : QuantizeFixture 104 { SimpleQuantizeFixtureQSymmS8SimpleQuantizeFixtureQSymmS8105 SimpleQuantizeFixtureQSymmS8() : QuantizeFixture("[ 1, 6 ]", 106 "[ 1, 6 ]", 107 "INT8", 108 "[]") {} 109 }; 110 111 TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymmS8, "SimpleQuantizeQSymmS8") 112 { 113 RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS8>( 114 0, 115 {{"inputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}}, 116 {{"outputTensor", { 0, 1, 5, 127, -128, -1 }}}); 117 } 118 119 } 120