xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/kernels/ClGemmLowpReductionKernel.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CL_GEMMLOWP_REDUCTION_KERNEL_H
25 #define ARM_COMPUTE_CL_GEMMLOWP_REDUCTION_KERNEL_H
26 
27 #include "arm_compute/core/KernelDescriptors.h"
28 #include "src/core/common/Macros.h"
29 #include "src/gpu/cl/ClCompileContext.h"
30 #include "src/gpu/cl/IClKernel.h"
31 
32 namespace arm_compute
33 {
34 namespace opencl
35 {
36 namespace kernels
37 {
38 /** Common interface for all OpenCL reduction kernels */
39 class IClGemmLowpReductionKernel : public IClKernel
40 {
41 public:
42     IClGemmLowpReductionKernel();
43     ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(IClGemmLowpReductionKernel);
44     /** Initialise the kernel's input and output.
45      *
46      * @param[in]  compile_context The compile context to be used.
47      * @param[in]  input           Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
48      * @param[out] output          Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
49      * @param[in]  info            Kernel metadata:
50      *                             - k            Number of matrix columns/rows depending on the type of reduction.
51      *                             - is_reshaped  True if the matrix has been reshaped.
52      *                             - scalar       Scalar value to multiply each reduced column/row by.
53      *                             - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
54      */
55     virtual void configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output, const GEMMLowpReductionKernelInfo &info) = 0;
56 };
57 
58 /** OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
59  *
60  * @note This stage is needed to handle the offset of matrix product
61  *       https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
62  */
63 class ClGemmLowpMatrixAReductionKernel : public IClGemmLowpReductionKernel
64 {
65 public:
66     /** Initialise the kernel's input and output.
67      *
68      * @param[in]  compile_context The compile context to be used.
69      * @param[in]  mtx_a           Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8.
70      * @param[out] vector_sum_row  Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
71      * @param[in]  info            Kernel metadata:
72      *                             - k            Number of matrix columns/rows depending on the type of reduction.
73      *                             - is_reshaped  True if the matrix has been reshaped.
74      *                             - scalar       Scalar value to multiply each reduced column/row by.
75      *                             - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
76      */
77     void configure(const CLCompileContext &compile_context, const ITensorInfo *mtx_a, ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override;
78     /** Static function to check if given info will lead to a valid configuration
79      *
80      * Similar to @ref ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel::configure()
81      *
82      * @return a status
83      */
84     static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info);
85 
86     // Inherited methods overridden:
87     void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
88 };
89 
90 /** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
91  *
92  * @note This stage is needed to handle the offset of matrix product
93  *       https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
94  */
95 class ClGemmLowpMatrixBReductionKernel : public IClGemmLowpReductionKernel
96 {
97 public:
98     /** Initialise the kernel's input and output.
99      *
100      * @param[in]  compile_context The compile context to be used.
101      * @param[in]  mtx_b           Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
102      * @param[out] vector_sum_col  Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
103      * @param[in]  info            Kernel metadata:
104      *                             - k            Number of matrix columns/rows depending on the type of reduction.
105      *                             - is_reshaped  True if the matrix has been reshaped.
106      *                             - scalar       Scalar value to multiply each reduced column/row by.
107      *                             - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
108      */
109     void configure(const CLCompileContext &compile_context, const ITensorInfo *mtx_b, ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override;
110     /** Static function to check if given info will lead to a valid configuration
111      *
112      * Similar to @ref ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel::configure()
113      *
114      * @return a status
115      */
116     static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info);
117 
118     // Inherited methods overridden:
119     void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
120 };
121 } // namespace kernels
122 } // namespace opencl
123 } // namespace arm_compute
124 #endif /* ARM_COMPUTE_CL_GEMMLOWP_REDUCTION_KERNEL_H */
125