1*c217d954SCole Faust /*
2*c217d954SCole Faust * Copyright (c) 2020-2021 Arm Limited.
3*c217d954SCole Faust *
4*c217d954SCole Faust * SPDX-License-Identifier: MIT
5*c217d954SCole Faust *
6*c217d954SCole Faust * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust *
13*c217d954SCole Faust * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust * copies or substantial portions of the Software.
15*c217d954SCole Faust *
16*c217d954SCole Faust * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust * SOFTWARE.
23*c217d954SCole Faust */
24*c217d954SCole Faust #include "src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h"
25*c217d954SCole Faust
26*c217d954SCole Faust #include "arm_compute/core/Error.h"
27*c217d954SCole Faust #include "arm_compute/core/Helpers.h"
28*c217d954SCole Faust #include "arm_compute/core/ITensor.h"
29*c217d954SCole Faust #include "arm_compute/core/Types.h"
30*c217d954SCole Faust #include "arm_compute/core/Utils.h"
31*c217d954SCole Faust #include "arm_compute/core/Validate.h"
32*c217d954SCole Faust #include "arm_compute/core/Window.h"
33*c217d954SCole Faust #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
34*c217d954SCole Faust #include "src/core/AccessWindowStatic.h"
35*c217d954SCole Faust #include "src/core/NEON/wrapper/wrapper.h"
36*c217d954SCole Faust #include "src/core/helpers/AutoConfiguration.h"
37*c217d954SCole Faust #include "src/core/helpers/WindowHelpers.h"
38*c217d954SCole Faust
39*c217d954SCole Faust #include <arm_neon.h>
40*c217d954SCole Faust
41*c217d954SCole Faust namespace arm_compute
42*c217d954SCole Faust {
43*c217d954SCole Faust namespace cpu
44*c217d954SCole Faust {
45*c217d954SCole Faust namespace kernels
46*c217d954SCole Faust {
47*c217d954SCole Faust namespace
48*c217d954SCole Faust {
validate_arguments(const ITensorInfo * src,const ITensorInfo * bias,const ITensorInfo * dst,const GEMMLowpOutputStageInfo * output_stage)49*c217d954SCole Faust Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
50*c217d954SCole Faust {
51*c217d954SCole Faust ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
52*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32);
53*c217d954SCole Faust
54*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)));
55*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
56*c217d954SCole Faust || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound);
57*c217d954SCole Faust
58*c217d954SCole Faust // Check biases if exist
59*c217d954SCole Faust if(bias != nullptr)
60*c217d954SCole Faust {
61*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias);
62*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
63*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0));
64*c217d954SCole Faust }
65*c217d954SCole Faust
66*c217d954SCole Faust if(dst->total_size() != 0)
67*c217d954SCole Faust {
68*c217d954SCole Faust if(dst->data_type() != output_stage->output_data_type && (output_stage->output_data_type == DataType::QASYMM8 || output_stage->output_data_type == DataType::QASYMM8_SIGNED))
69*c217d954SCole Faust {
70*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_MSG("Mismatching data types");
71*c217d954SCole Faust }
72*c217d954SCole Faust
73*c217d954SCole Faust ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
74*c217d954SCole Faust }
75*c217d954SCole Faust
76*c217d954SCole Faust return Status{};
77*c217d954SCole Faust }
78*c217d954SCole Faust
scale_input(int32x4x4_t & in_s32,int32x4_t result_offset_s32,int32_t result_mult_int)79*c217d954SCole Faust inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_t result_mult_int)
80*c217d954SCole Faust {
81*c217d954SCole Faust // Add the offset terms to GEMM's result
82*c217d954SCole Faust in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_s32);
83*c217d954SCole Faust in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_s32);
84*c217d954SCole Faust in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_s32);
85*c217d954SCole Faust in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_s32);
86*c217d954SCole Faust
87*c217d954SCole Faust // Multiply by result_mult_int
88*c217d954SCole Faust in_s32.val[0] = vmulq_n_s32(in_s32.val[0], result_mult_int);
89*c217d954SCole Faust in_s32.val[1] = vmulq_n_s32(in_s32.val[1], result_mult_int);
90*c217d954SCole Faust in_s32.val[2] = vmulq_n_s32(in_s32.val[2], result_mult_int);
91*c217d954SCole Faust in_s32.val[3] = vmulq_n_s32(in_s32.val[3], result_mult_int);
92*c217d954SCole Faust }
93*c217d954SCole Faust
94*c217d954SCole Faust template <typename T>
95*c217d954SCole Faust inline typename std::enable_if<std::is_same<T, uint8_t>::value,
96*c217d954SCole Faust typename wrapper::traits::neon_vector<T, 16>::type>::type
convert_to_8bit(const int16x8x2_t in_s16)97*c217d954SCole Faust convert_to_8bit(const int16x8x2_t in_s16)
98*c217d954SCole Faust {
99*c217d954SCole Faust return wrapper::vcombine(wrapper::vqmovun(in_s16.val[0]), wrapper::vqmovun(in_s16.val[1]));
100*c217d954SCole Faust }
101*c217d954SCole Faust
102*c217d954SCole Faust template <typename T>
103*c217d954SCole Faust inline typename std::enable_if<std::is_same<T, int8_t>::value,
104*c217d954SCole Faust typename wrapper::traits::neon_vector<T, 16>::type>::type
convert_to_8bit(const int16x8x2_t in_s16)105*c217d954SCole Faust convert_to_8bit(const int16x8x2_t in_s16)
106*c217d954SCole Faust {
107*c217d954SCole Faust return wrapper::vcombine(wrapper::vqmovn(in_s16.val[0]), wrapper::vqmovn(in_s16.val[1]));
108*c217d954SCole Faust }
109*c217d954SCole Faust
110*c217d954SCole Faust template <typename T>
finalize_quantization(int32x4x4_t & in_s32,int32x4_t result_shift_s32,typename wrapper::traits::neon_vector<T,16>::type min,typename wrapper::traits::neon_vector<T,16>::type max)111*c217d954SCole Faust inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector<T, 16>::type min,
112*c217d954SCole Faust typename wrapper::traits::neon_vector<T, 16>::type max)
113*c217d954SCole Faust {
114*c217d954SCole Faust // Shift final result (negative value shift right)
115*c217d954SCole Faust in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
116*c217d954SCole Faust in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
117*c217d954SCole Faust in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
118*c217d954SCole Faust in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
119*c217d954SCole Faust
120*c217d954SCole Faust // Convert S32 to S16
121*c217d954SCole Faust const int16x8x2_t in_s16 =
122*c217d954SCole Faust {
123*c217d954SCole Faust {
124*c217d954SCole Faust vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
125*c217d954SCole Faust vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
126*c217d954SCole Faust }
127*c217d954SCole Faust };
128*c217d954SCole Faust
129*c217d954SCole Faust // Convert S16 to S8 or U8
130*c217d954SCole Faust typename wrapper::traits::neon_vector<T, 16>::type out = convert_to_8bit<T>(in_s16);
131*c217d954SCole Faust
132*c217d954SCole Faust out = wrapper::vmax(out, min);
133*c217d954SCole Faust out = wrapper::vmin(out, max);
134*c217d954SCole Faust
135*c217d954SCole Faust return out;
136*c217d954SCole Faust }
137*c217d954SCole Faust } // namespace
138*c217d954SCole Faust
139*c217d954SCole Faust template <typename T>
run_internal(const ITensor * src,const ITensor * bias,ITensor * dst,const Window & window)140*c217d954SCole Faust void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window)
141*c217d954SCole Faust {
142*c217d954SCole Faust using VectorType = typename wrapper::traits::neon_vector<T, 16>::type;
143*c217d954SCole Faust
144*c217d954SCole Faust const int32x4_t result_offset_s32 = vdupq_n_s32(_output_stage->gemmlowp_offset);
145*c217d954SCole Faust const int32x4_t result_shift_s32 = vdupq_n_s32(-_output_stage->gemmlowp_shift);
146*c217d954SCole Faust const int window_step_x = 16;
147*c217d954SCole Faust const auto window_start_x = static_cast<int>(window.x().start());
148*c217d954SCole Faust const auto window_end_x = static_cast<int>(window.x().end());
149*c217d954SCole Faust
150*c217d954SCole Faust const int clamp_min = (_is_bounded_relu) ? _output_stage->gemmlowp_min_bound : std::numeric_limits<T>::lowest();
151*c217d954SCole Faust const int clamp_max = (_is_bounded_relu) ? _output_stage->gemmlowp_max_bound : std::numeric_limits<T>::max();
152*c217d954SCole Faust
153*c217d954SCole Faust VectorType min = wrapper::vdup_n(static_cast<T>(clamp_min), wrapper::traits::vector_128_tag{});
154*c217d954SCole Faust VectorType max = wrapper::vdup_n(static_cast<T>(clamp_max), wrapper::traits::vector_128_tag{});
155*c217d954SCole Faust
156*c217d954SCole Faust Window win(window);
157*c217d954SCole Faust win.set(Window::DimX, Window::Dimension(0, 1, 1));
158*c217d954SCole Faust
159*c217d954SCole Faust Iterator in(src, win);
160*c217d954SCole Faust Iterator out(dst, win);
161*c217d954SCole Faust
162*c217d954SCole Faust if(bias != nullptr)
163*c217d954SCole Faust {
164*c217d954SCole Faust Window win_biases;
165*c217d954SCole Faust win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
166*c217d954SCole Faust win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
167*c217d954SCole Faust
168*c217d954SCole Faust Iterator bias_i(bias, win_biases);
169*c217d954SCole Faust execute_window_loop(win, [&](const Coordinates &)
170*c217d954SCole Faust {
171*c217d954SCole Faust // Compute 16 elements per iteration
172*c217d954SCole Faust int x = window_start_x;
173*c217d954SCole Faust for(; x <= (window_end_x - window_step_x); x += window_step_x)
174*c217d954SCole Faust {
175*c217d954SCole Faust int32x4x4_t in_s32 =
176*c217d954SCole Faust {
177*c217d954SCole Faust {
178*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
179*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
180*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
181*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
182*c217d954SCole Faust }
183*c217d954SCole Faust };
184*c217d954SCole Faust
185*c217d954SCole Faust const int32x4x4_t bias_s32 =
186*c217d954SCole Faust {
187*c217d954SCole Faust {
188*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0),
189*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4),
190*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8),
191*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12)
192*c217d954SCole Faust }
193*c217d954SCole Faust };
194*c217d954SCole Faust
195*c217d954SCole Faust // Add the bias to GEMM's result
196*c217d954SCole Faust in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
197*c217d954SCole Faust in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
198*c217d954SCole Faust in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
199*c217d954SCole Faust in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
200*c217d954SCole Faust
201*c217d954SCole Faust // Add the offset terms to GEMM's result and multiply by result_mult_int
202*c217d954SCole Faust scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier);
203*c217d954SCole Faust
204*c217d954SCole Faust wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max));
205*c217d954SCole Faust }
206*c217d954SCole Faust
207*c217d954SCole Faust // Compute left-over elements
208*c217d954SCole Faust for(; x < window_end_x; ++x)
209*c217d954SCole Faust {
210*c217d954SCole Faust const int bias_value = *(reinterpret_cast<const int *>(bias_i.ptr()) + x);
211*c217d954SCole Faust int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x);
212*c217d954SCole Faust
213*c217d954SCole Faust // Quantize
214*c217d954SCole Faust in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift;
215*c217d954SCole Faust
216*c217d954SCole Faust // Store the result
217*c217d954SCole Faust *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max));
218*c217d954SCole Faust }
219*c217d954SCole Faust },
220*c217d954SCole Faust in, bias_i, out);
221*c217d954SCole Faust }
222*c217d954SCole Faust else
223*c217d954SCole Faust {
224*c217d954SCole Faust execute_window_loop(win, [&](const Coordinates &)
225*c217d954SCole Faust {
226*c217d954SCole Faust // Compute 16 elements per iteration
227*c217d954SCole Faust int x = window_start_x;
228*c217d954SCole Faust for(; x <= (window_end_x - window_step_x); x += window_step_x)
229*c217d954SCole Faust {
230*c217d954SCole Faust int32x4x4_t in_s32 =
231*c217d954SCole Faust {
232*c217d954SCole Faust {
233*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
234*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
235*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
236*c217d954SCole Faust vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
237*c217d954SCole Faust }
238*c217d954SCole Faust };
239*c217d954SCole Faust
240*c217d954SCole Faust // Add the offset terms to GEMM's result and multiply by result_mult_int
241*c217d954SCole Faust scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier);
242*c217d954SCole Faust
243*c217d954SCole Faust wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max));
244*c217d954SCole Faust }
245*c217d954SCole Faust
246*c217d954SCole Faust // Compute left-over elements
247*c217d954SCole Faust for(; x < window_end_x; ++x)
248*c217d954SCole Faust {
249*c217d954SCole Faust int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x);
250*c217d954SCole Faust
251*c217d954SCole Faust // Quantize
252*c217d954SCole Faust in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift;
253*c217d954SCole Faust
254*c217d954SCole Faust // Store the result
255*c217d954SCole Faust *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max));
256*c217d954SCole Faust }
257*c217d954SCole Faust },
258*c217d954SCole Faust in, out);
259*c217d954SCole Faust }
260*c217d954SCole Faust }
261*c217d954SCole Faust
configure(ITensorInfo * src,ITensorInfo * bias,ITensorInfo * dst,const GEMMLowpOutputStageInfo * output_stage)262*c217d954SCole Faust void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
263*c217d954SCole Faust {
264*c217d954SCole Faust ARM_COMPUTE_UNUSED(bias);
265*c217d954SCole Faust // Perform validate step
266*c217d954SCole Faust ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, output_stage);
267*c217d954SCole Faust
268*c217d954SCole Faust // Output auto inizialitation if not yet initialized
269*c217d954SCole Faust auto_init_if_empty(*dst, src->clone()->set_data_type(output_stage->output_data_type));
270*c217d954SCole Faust
271*c217d954SCole Faust ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src,
272*c217d954SCole Faust bias,
273*c217d954SCole Faust dst,
274*c217d954SCole Faust output_stage));
275*c217d954SCole Faust
276*c217d954SCole Faust _output_stage = output_stage;
277*c217d954SCole Faust
278*c217d954SCole Faust // Configure kernel window
279*c217d954SCole Faust Window win = calculate_max_window(*src, Steps());
280*c217d954SCole Faust
281*c217d954SCole Faust ICpuKernel::configure(win);
282*c217d954SCole Faust
283*c217d954SCole Faust // Check if we need to clamp the result using min and max
284*c217d954SCole Faust _is_bounded_relu = ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound)
285*c217d954SCole Faust && !(_output_stage->gemmlowp_min_bound == std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
286*c217d954SCole Faust && _output_stage->gemmlowp_max_bound == std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))));
287*c217d954SCole Faust if(_output_stage->output_data_type == DataType::QASYMM8)
288*c217d954SCole Faust {
289*c217d954SCole Faust _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<uint8_t>;
290*c217d954SCole Faust }
291*c217d954SCole Faust else if(_output_stage->output_data_type == DataType::QASYMM8_SIGNED)
292*c217d954SCole Faust {
293*c217d954SCole Faust _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<int8_t>;
294*c217d954SCole Faust }
295*c217d954SCole Faust else
296*c217d954SCole Faust {
297*c217d954SCole Faust ARM_COMPUTE_ERROR("Data type not supported");
298*c217d954SCole Faust }
299*c217d954SCole Faust }
300*c217d954SCole Faust
validate(const ITensorInfo * src,const ITensorInfo * bias,const ITensorInfo * dst,const GEMMLowpOutputStageInfo * output_stage)301*c217d954SCole Faust Status CpuGemmLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
302*c217d954SCole Faust {
303*c217d954SCole Faust ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, output_stage));
304*c217d954SCole Faust return Status{};
305*c217d954SCole Faust }
306*c217d954SCole Faust
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)307*c217d954SCole Faust void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
308*c217d954SCole Faust {
309*c217d954SCole Faust ARM_COMPUTE_UNUSED(info);
310*c217d954SCole Faust ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
311*c217d954SCole Faust ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
312*c217d954SCole Faust ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
313*c217d954SCole Faust
314*c217d954SCole Faust auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
315*c217d954SCole Faust auto bias = tensors.get_const_tensor(TensorType::ACL_BIAS);
316*c217d954SCole Faust auto dst = tensors.get_tensor(TensorType::ACL_DST);
317*c217d954SCole Faust (this->*_func)(src, bias, dst, window);
318*c217d954SCole Faust }
319*c217d954SCole Faust
name() const320*c217d954SCole Faust const char *CpuGemmLowpQuantizeDownInt32ScaleKernel::name() const
321*c217d954SCole Faust {
322*c217d954SCole Faust return "CpuGemmLowpQuantizeDownInt32ScaleKernel";
323*c217d954SCole Faust }
324*c217d954SCole Faust } // namespace kernels
325*c217d954SCole Faust } // namespace cpu
326*c217d954SCole Faust } // namespace arm_compute