xref: /aosp_15_r20/external/ComputeLibrary/src/core/helpers/PoolingHelpers.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2 * Copyright (c) 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef SRC_CORE_HELPERS_POOLINGHELPERS_H
25 #define SRC_CORE_HELPERS_POOLINGHELPERS_H
26 
27 #include "src/core/NEON/NEAsymm.h"
28 
29 namespace arm_compute
30 {
31 namespace cpu
32 {
33 namespace
34 {
35 
calculate_avg_scale_pool3d(bool exclude_padding,const Coordinates & id,const int pool_size_x,const int pool_size_y,const int pool_size_z,const int upper_bound_w,const int upper_bound_h,const int upper_bound_d,const int pad_x,const int pad_y,const int pad_z,const int stride_x,const int stride_y,const int stride_z)36 inline float calculate_avg_scale_pool3d(bool exclude_padding, const Coordinates &id, const int pool_size_x, const int pool_size_y, const int pool_size_z, const int upper_bound_w,
37                                  const int upper_bound_h, const int upper_bound_d, const int pad_x, const int pad_y, const int pad_z, const int stride_x, const int stride_y, const int stride_z)
38 {
39     // Based on NDHWC
40     int start_x = id[1] * stride_x - pad_x;
41     int start_y = id[2] * stride_y - pad_y;
42     int start_z = id[3] * stride_z - pad_z;
43 
44     const int end_x = std::min(start_x + pool_size_x, upper_bound_w);
45     const int end_y = std::min(start_y + pool_size_y, upper_bound_h);
46     const int end_z = std::min(start_z + pool_size_z, upper_bound_d);
47     if(exclude_padding)
48     {
49         start_x = std::max(0, start_x);
50         start_y = std::max(0, start_y);
51         start_z = std::max(0, start_z);
52     }
53     return 1.f / ((end_y - start_y) * (end_x - start_x) * (end_z - start_z));
54 }
55 
calculate_avg_scale_pool2d(bool exclude_padding,DataLayout data_layout,const Coordinates & id,const int pool_size_x,const int pool_size_y,const int upper_bound_w,const int upper_bound_h,const int pad_x,const int pad_y,const int stride_x,const int stride_y)56 inline float calculate_avg_scale_pool2d(bool exclude_padding, DataLayout data_layout, const Coordinates &id, const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
57                                  const int pad_x, const int pad_y, const int stride_x, const int stride_y)
58 {
59     const unsigned int idx_width  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
60     const unsigned int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
61 
62     int start_x = id[idx_width] * stride_x - pad_x;
63     int start_y = id[idx_height] * stride_y - pad_y;
64 
65     const int end_x = std::min(start_x + pool_size_x, upper_bound_w);
66     const int end_y = std::min(start_y + pool_size_y, upper_bound_h);
67     if(exclude_padding)
68     {
69         start_x = std::max(0, start_x);
70         start_y = std::max(0, start_y);
71     }
72     return 1.f / ((end_y - start_y) * (end_x - start_x));
73 }
74 
75 template <typename T>
76 inline typename std::enable_if<std::is_same<T, int8_t>::value, int8_t>::type
quantize(float val,const UniformQuantizationInfo & info)77 quantize(float val, const UniformQuantizationInfo &info)
78 {
79     return quantize_qasymm8_signed(val, info);
80 }
81 
82 template <typename T>
83 inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint8_t>::type
quantize(float val,const UniformQuantizationInfo & info)84 quantize(float val, const UniformQuantizationInfo &info)
85 {
86     return quantize_qasymm8(val, info);
87 }
88 
89 template <typename T>
90 inline T vcvtq_q32_f32(float32x4_t values);
91 
92 template <>
vcvtq_q32_f32(float32x4_t values)93 inline uint32x4_t vcvtq_q32_f32(float32x4_t values)
94 {
95     return vcvtq_u32_f32(values);
96 }
97 
98 template <>
vcvtq_q32_f32(float32x4_t values)99 inline int32x4_t vcvtq_q32_f32(float32x4_t values)
100 {
101     return vcvtq_s32_f32(values);
102 }
103 
104 template <typename T>
105 inline float32x4_t vcvtq_f32_q32(T values);
106 
107 template <>
vcvtq_f32_q32(uint32x4_t values)108 inline float32x4_t vcvtq_f32_q32(uint32x4_t values)
109 {
110     return vcvtq_f32_u32(values);
111 }
112 
113 template <>
vcvtq_f32_q32(int32x4_t values)114 inline float32x4_t vcvtq_f32_q32(int32x4_t values)
115 {
116     return vcvtq_f32_s32(values);
117 }
118 
119 template <typename Tout>
120 inline Tout vrequantize_pooling_with_scale(const float32x4x4_t &acc, const float quant_rescale, const float scale_pooling, const int32_t new_offset);
121 
122 template <>
vrequantize_pooling_with_scale(const float32x4x4_t & acc,const float quant_rescale,const float scale_pooling,const int32_t new_offset)123 inline uint8x16_t vrequantize_pooling_with_scale(const float32x4x4_t &acc, const float quant_rescale, const float scale_pooling, const int32_t new_offset)
124 {
125     const float new_scale = quant_rescale / scale_pooling;
126     return vquantize(acc, UniformQuantizationInfo(new_scale, new_offset));
127 }
128 
129 template <>
vrequantize_pooling_with_scale(const float32x4x4_t & acc,const float quant_rescale,const float scale_pooling,const int32_t new_offset)130 inline int8x16_t vrequantize_pooling_with_scale(const float32x4x4_t &acc, const float quant_rescale, const float scale_pooling, const int32_t new_offset)
131 {
132     const float new_scale = quant_rescale / scale_pooling;
133     return vquantize_signed(acc, UniformQuantizationInfo(new_scale, new_offset));
134 }
135 
136 template <typename Tin, typename Tout>
137 inline Tout vrequantize_pooling(Tin vec1, Tin vec2, const UniformQuantizationInfo &requant_qinfo);
138 
139 template <>
vrequantize_pooling(uint8x8_t vec1,uint8x8_t vec2,const UniformQuantizationInfo & requant_qinfo)140 inline uint8x16_t vrequantize_pooling(uint8x8_t vec1, uint8x8_t vec2, const UniformQuantizationInfo &requant_qinfo)
141 {
142     const float32x4x4_t acc =
143     {
144         {
145             vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8((vec1))))),
146             vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8((vec1))))),
147             vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8((vec2))))),
148             vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8((vec2))))),
149         }
150     };
151     return vquantize(acc, requant_qinfo);
152 }
153 
154 template <>
vrequantize_pooling(int8x8_t vec1,int8x8_t vec2,const UniformQuantizationInfo & requant_qinfo)155 inline int8x16_t vrequantize_pooling(int8x8_t vec1, int8x8_t vec2, const UniformQuantizationInfo &requant_qinfo)
156 {
157     const float32x4x4_t acc =
158     {
159         {
160             vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8((vec1))))),
161             vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8((vec1))))),
162             vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8((vec2))))),
163             vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8((vec2))))),
164         }
165     };
166     return vquantize_signed(acc, requant_qinfo);
167 }
168 
169 template <typename T>
170 inline T vrequantize_pooling(T &vec, const UniformQuantizationInfo &requant_qinfo);
171 
172 template <>
vrequantize_pooling(uint8x8_t & vec,const UniformQuantizationInfo & requant_qinfo)173 inline uint8x8_t vrequantize_pooling(uint8x8_t &vec, const UniformQuantizationInfo &requant_qinfo)
174 {
175     const float32x4x2_t acc =
176     {
177         {
178             vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8((vec))))),
179             vcvtq_f32_u32(vmovl_u16(vget_high_u16(vmovl_u8((vec))))),
180         }
181     };
182     return vquantize(acc, requant_qinfo);
183 }
184 
185 template <>
vrequantize_pooling(int8x8_t & vec,const UniformQuantizationInfo & requant_qinfo)186 inline int8x8_t vrequantize_pooling(int8x8_t &vec, const UniformQuantizationInfo &requant_qinfo)
187 {
188     const float32x4x2_t acc =
189     {
190         {
191             vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8((vec))))),
192             vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8((vec))))),
193         }
194     };
195     return vquantize_signed(acc, requant_qinfo);
196 }
197 
198 } // namespace
199 } // namespace cpu
200 } // namespace arm_compute
201 #endif /* SRC_CORE_HELPERS_POOLINGHELPERS_H */
202 
203