1 /*
2 * Copyright (c) 2017-2021, 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "PoolingLayer.h"
25
26 #include "arm_compute/core/Types.h"
27 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
28 #include "tests/validation/Helpers.h"
29
30 namespace arm_compute
31 {
32 namespace test
33 {
34 namespace validation
35 {
36 namespace reference
37 {
38 using namespace arm_compute::misc::shape_calculator;
39
40 template <typename T, typename ACC_T, typename std::enable_if<is_floating_point<T>::value, int>::type>
pooling_layer_internal(const SimpleTensor<T> & src,const PoolingLayerInfo & info,SimpleTensor<uint32_t> * indices,DataLayout data_layout)41 SimpleTensor<T> pooling_layer_internal(const SimpleTensor<T> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
42 {
43 // Create reference
44 SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), src.data_type(), 1 };
45 auto pooled_shape = compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info);
46 if(indices)
47 {
48 *indices = SimpleTensor<uint32_t> { pooled_shape, DataType::U32, 1 };
49 }
50 const int pool_size_x = info.is_global_pooling ? src.shape().x() : info.pool_size.width;
51 const int pool_size_y = info.is_global_pooling ? src.shape().y() : info.pool_size.height;
52 PoolingType type = info.pool_type;
53 int pool_stride_x = info.pad_stride_info.stride().first;
54 int pool_stride_y = info.pad_stride_info.stride().second;
55 int pad_left = info.pad_stride_info.pad_left();
56 int pad_top = info.pad_stride_info.pad_top();
57 int pad_right = info.pad_stride_info.pad_right();
58 int pad_bottom = info.pad_stride_info.pad_bottom();
59 bool exclude_padding = info.exclude_padding;
60
61 const auto w_src = static_cast<int>(src.shape()[0]);
62 const auto h_src = static_cast<int>(src.shape()[1]);
63 const auto z_src = static_cast<int>(src.shape()[2]);
64 const auto b_src = static_cast<int>(src.shape()[3]);
65
66 const int upper_dims = src.shape().total_size() / (w_src * h_src);
67
68 const auto w_dst = static_cast<int>(dst.shape()[0]);
69 const auto h_dst = static_cast<int>(dst.shape()[1]);
70 const auto z_dst = static_cast<int>(dst.shape()[2]);
71
72 TensorShape shape_nhwc(src.shape());
73 permute(shape_nhwc, PermutationVector(2U, 0U, 1U));
74 if(type == PoolingType::MAX)
75 {
76 for(int b = 0; b < b_src; ++b)
77 {
78 for(int r = 0; r < z_src; ++r)
79 {
80 for(int h = 0; h < h_dst; ++h)
81 {
82 for(int w = 0; w < w_dst; ++w)
83 {
84 int wstart = w * pool_stride_x - pad_left;
85 int hstart = h * pool_stride_y - pad_top;
86 int wend = std::min(wstart + pool_size_x, w_src);
87 int hend = std::min(hstart + pool_size_y, h_src);
88 wstart = std::max(wstart, 0);
89 hstart = std::max(hstart, 0);
90 auto max_val = -std::numeric_limits<ACC_T>::infinity();
91 int max_index{ 0 };
92 for(int y = hstart; y < hend; ++y)
93 {
94 for(int x = wstart; x < wend; ++x)
95 {
96 const auto val = static_cast<ACC_T>(src[b * z_src * h_src * w_src + r * h_src * w_src + y * w_src + x]);
97 if(val > max_val)
98 {
99 max_val = val;
100 if(data_layout == DataLayout::NCHW)
101 {
102 max_index = coord2index(src.shape(), Coordinates(x, y, r, 0));
103 }
104 else
105 {
106 max_index = coord2index(shape_nhwc, Coordinates(r, x, y, 0));
107 }
108 }
109 }
110 }
111
112 dst[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(max_val);
113 if(indices)
114 {
115 (*indices)[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = max_index;
116 }
117 }
118 }
119 }
120 }
121 }
122 else // Average or l2 pooling
123 {
124 for(int r = 0; r < upper_dims; ++r)
125 {
126 for(int h = 0; h < h_dst; ++h)
127 {
128 for(int w = 0; w < w_dst; ++w)
129 {
130 ACC_T avg_val(0);
131 int wstart = w * pool_stride_x - pad_left;
132 int hstart = h * pool_stride_y - pad_top;
133 int wend = std::min(wstart + pool_size_x, w_src + pad_right);
134 int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
135 int pool = (hend - hstart) * (wend - wstart);
136 wstart = std::max(wstart, 0);
137 hstart = std::max(hstart, 0);
138 wend = std::min(wend, w_src);
139 hend = std::min(hend, h_src);
140 // Exclude padding pixels from the average
141 if(exclude_padding)
142 {
143 pool = (hend - hstart) * (wend - wstart);
144 }
145
146 if(type == PoolingType::AVG)
147 {
148 for(int y = hstart; y < hend; ++y)
149 {
150 for(int x = wstart; x < wend; ++x)
151 {
152 avg_val += static_cast<ACC_T>(src[r * h_src * w_src + y * w_src + x]);
153 }
154 }
155 dst[r * h_dst * w_dst + h * w_dst + w] = avg_val / pool;
156 }
157 else
158 {
159 for(int y = hstart; y < hend; ++y)
160 {
161 for(int x = wstart; x < wend; ++x)
162 {
163 const auto val = static_cast<ACC_T>(src[r * h_src * w_src + y * w_src + x]);
164 avg_val += val * val;
165 }
166 }
167 dst[r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(std::sqrt(avg_val / pool));
168 }
169 }
170 }
171 }
172 }
173 return dst;
174 }
175
176 template SimpleTensor<float> pooling_layer_internal<float>(const SimpleTensor<float> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
177
178 template SimpleTensor<half> pooling_layer_internal<half>(const SimpleTensor<half> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
179
180 template SimpleTensor<half> pooling_layer_internal<half, float>(const SimpleTensor<half> &src, const PoolingLayerInfo &info, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
181
182 template <typename T>
pooling_layer(const SimpleTensor<T> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)183 SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
184 {
185 ARM_COMPUTE_UNUSED(output_qinfo);
186 return pooling_layer_internal<T, T>(src, info, indices, data_layout);
187 }
188
189 template <>
pooling_layer(const SimpleTensor<uint8_t> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)190 SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices,
191 DataLayout data_layout)
192 {
193 SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
194 SimpleTensor<float> dst_tmp = pooling_layer_internal<float>(src_tmp, info, indices, data_layout);
195 SimpleTensor<uint8_t> dst = convert_to_asymmetric<uint8_t>(dst_tmp, output_qinfo);
196 return dst;
197 }
198
199 template <>
pooling_layer(const SimpleTensor<int8_t> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)200 SimpleTensor<int8_t> pooling_layer<int8_t>(const SimpleTensor<int8_t> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
201 {
202 SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
203 SimpleTensor<float> dst_tmp = pooling_layer_internal<float>(src_tmp, info, indices, data_layout);
204 SimpleTensor<int8_t> dst = convert_to_asymmetric<int8_t>(dst_tmp, output_qinfo);
205 return dst;
206 }
207
208 template <>
pooling_layer(const SimpleTensor<half> & src,const PoolingLayerInfo & info,const QuantizationInfo & output_qinfo,SimpleTensor<uint32_t> * indices,DataLayout data_layout)209 SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout)
210 {
211 ARM_COMPUTE_UNUSED(output_qinfo);
212 if(src.data_type() == DataType::F16 && info.fp_mixed_precision)
213 {
214 return pooling_layer_internal<half, float>(src, info, indices, data_layout);
215 }
216
217 return pooling_layer_internal<half>(src, info, indices, data_layout);
218 }
219
220 template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> *indices, DataLayout data_layout);
221
222 } // namespace reference
223 } // namespace validation
224 } // namespace test
225 } // namespace arm_compute
226