1 /* 2 * Copyright (c) 2020-2022 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H 25 #define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H 26 27 #include "arm_compute/core/KernelDescriptors.h" 28 #include "src/core/common/Macros.h" 29 #include "src/cpu/ICpuKernel.h" 30 31 namespace arm_compute 32 { 33 // Forward declarations 34 class ITensor; 35 namespace cpu 36 { 37 namespace kernels 38 { 39 /** Kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED 40 * 41 * This kernel takes a final int32 accumulator value (the output of @ref CpuGemmLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value. 42 * The following computations will be performed by the kernel: 43 * 44 * -# Add offset terms to final result 45 * -# Multiply each entry of result by result_mult_int 46 * -# Add bias to final result if bias tensor is not a nullptr 47 * -# Shift the int32 accumulator by result_shift 48 * -# Clamp the value between the specified min and max bounds 49 * -# Clamp the resulting int32 values: 50 * -# -to the [0..255] range and cast to QASYMM8. 51 * -# -to the [-128..127] range and cast to QASYMM8_SIGNED. 52 * 53 */ 54 class CpuGemmLowpQuantizeDownInt32ScaleKernel : public ICpuKernel<CpuGemmLowpQuantizeDownInt32ScaleKernel> 55 { 56 public: 57 CpuGemmLowpQuantizeDownInt32ScaleKernel() = default; 58 ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ScaleKernel); 59 /** Initialise the kernel's input and output. 60 * 61 * @param[in] src Input tensor info. Data type supported: S32 62 * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required. 63 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. 64 * @param[out] dst Output tensor info. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED 65 * @param[out] output_stage GEMMLowp output stage metadata. 66 */ 67 void configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage); 68 /** Static function to check if given info will lead to a valid configuration 69 * 70 * Similar to CpuGemmLowpQuantizeDownInt32ScaleKernel::configure() 71 * 72 * @return a status 73 */ 74 static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage); 75 76 // Inherited methods overridden: 77 void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; 78 const char *name() const override; 79 80 private: 81 /** Template function to run the NEGEMMLowpQuantizeDownInt32ScaleKernel 82 * 83 * @param[in] src Input tensor info 84 * @param[in] bias Biases tensor info 85 * @param[out] dst Output tensor info 86 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()) 87 */ 88 template <typename T> 89 void run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window); 90 91 /** Common signature for all the specialised CpuGemmLowpQuantizeDownInt32ScaleKernel functions 92 * 93 * @param[in] src Input tensor info 94 * @param[in] bias Biases tensor info 95 * @param[out] dst Output tensor info 96 * @param[in] window Region on which to execute the kernel. 97 */ 98 using QuantizeDownFunctionPtr = void (CpuGemmLowpQuantizeDownInt32ScaleKernel::*)(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window); 99 100 QuantizeDownFunctionPtr _func{ nullptr }; 101 const GEMMLowpOutputStageInfo *_output_stage{ nullptr }; 102 bool _is_bounded_relu{ false }; 103 }; 104 } // namespace kernels 105 } // namespace cpu 106 } // namespace arm_compute 107 #endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWN_INT32_SCALE_KERNEL_H */ 108