1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Operations"
18 
19 #include "Quantize.h"
20 
21 #include <algorithm>
22 #include <cmath>
23 
24 #include "IndexedShapeWrapper.h"
25 #include "OperationResolver.h"
26 #include "OperationsExecutionUtils.h"
27 #include "Tracing.h"
28 
29 namespace android {
30 namespace nn {
31 namespace quantize {
32 namespace {
33 
34 // The quantization formula also appears in Elementwise.cpp.
35 template <typename T>
quantizeToQuant8(const T * inputData,uint8_t * outputData,const Shape & outputShape)36 bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
37     NNTRACE_COMP("quantizeToQuant8");
38     uint32_t size = getNumberOfElements(outputShape);
39     for (uint32_t i = 0; i < size; ++i) {
40         outputData[i] = static_cast<uint8_t>(std::max<float>(
41                 0.0f, std::min<float>(255.0f, outputShape.offset + std::round(inputData[i] /
42                                                                               outputShape.scale))));
43     }
44     return true;
45 }
46 
47 // The quantization formula also appears in Elementwise.cpp.
48 template <typename T>
quantizeToQuant8Signed(const T * inputData,int8_t * outputData,const Shape & outputShape)49 bool quantizeToQuant8Signed(const T* inputData, int8_t* outputData, const Shape& outputShape) {
50     NNTRACE_COMP("quantizeToQuant8Signed");
51     uint32_t size = getNumberOfElements(outputShape);
52     for (uint32_t i = 0; i < size; ++i) {
53         outputData[i] = static_cast<int8_t>(std::max<float>(
54                 -128.0f,
55                 std::min<float>(127.0f, outputShape.offset +
56                                                 std::round(inputData[i] / outputShape.scale))));
57     }
58     return true;
59 }
60 
61 }  // namespace
62 
prepare(IOperationExecutionContext * context)63 bool prepare(IOperationExecutionContext* context) {
64     const Shape& input = context->getInputShape(kInputTensor);
65     Shape output = context->getOutputShape(kOutputTensor);
66     output.dimensions = input.dimensions;
67     return context->setOutputShape(kOutputTensor, output);
68 }
69 
execute(IOperationExecutionContext * context)70 bool execute(IOperationExecutionContext* context) {
71     // Bypass execution in the case of zero-sized input.
72     if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
73 
74     const OperandType inputType = context->getInputType(kInputTensor);
75     const OperandType outputType = context->getOutputType(kOutputTensor);
76     if (inputType == OperandType::TENSOR_FLOAT32) {
77         if (outputType == OperandType::TENSOR_QUANT8_ASYMM) {
78             return quantizeToQuant8<float>(context->getInputBuffer<float>(kInputTensor),
79                                            context->getOutputBuffer<uint8_t>(kOutputTensor),
80                                            context->getOutputShape(kOutputTensor));
81         } else if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
82             return quantizeToQuant8Signed<float>(context->getInputBuffer<float>(kInputTensor),
83                                                  context->getOutputBuffer<int8_t>(kOutputTensor),
84                                                  context->getOutputShape(kOutputTensor));
85         }
86     } else if (inputType == OperandType::TENSOR_FLOAT16) {
87         if (outputType == OperandType::TENSOR_QUANT8_ASYMM) {
88             return quantizeToQuant8<_Float16>(context->getInputBuffer<_Float16>(kInputTensor),
89                                               context->getOutputBuffer<uint8_t>(kOutputTensor),
90                                               context->getOutputShape(kOutputTensor));
91         } else if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
92             return quantizeToQuant8Signed<_Float16>(context->getInputBuffer<_Float16>(kInputTensor),
93                                                     context->getOutputBuffer<int8_t>(kOutputTensor),
94                                                     context->getOutputShape(kOutputTensor));
95         }
96     }
97     NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
98                         << inputType << " output type: " << context->getOutputType(kOutputTensor)
99                         << ")";
100 }
101 
102 }  // namespace quantize
103 
104 NN_REGISTER_OPERATION_DEFAULT_VALIDATION(QUANTIZE, quantize::prepare, quantize::execute,
105                                          .allowZeroSizedInput = true);
106 
107 }  // namespace nn
108 }  // namespace android
109