1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/cpu/kernels/CpuPool2dKernel.h"
25
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/TensorInfo.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/core/Window.h"
30 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
31 #include "src/core/AccessWindowStatic.h"
32 #include "src/core/CPP/Validate.h"
33 #include "src/core/NEON/NEAsymm.h"
34 #include "src/core/NEON/NEFixedPoint.h"
35 #include "src/core/NEON/NEMath.h"
36 #include "src/core/common/Registrars.h"
37 #include "src/core/helpers/AutoConfiguration.h"
38 #include "src/core/helpers/WindowHelpers.h"
39 #include "src/cpu/kernels/pool2d/neon/list.h"
40 #include "support/ToolchainSupport.h"
41
42 #include "src/core/NEON/wrapper/wrapper.h"
43 #include <arm_neon.h>
44
45 namespace arm_compute
46 {
47 namespace cpu
48 {
49 namespace kernels
50 {
51 namespace
52 {
53 using namespace misc::shape_calculator;
54
55 static const std::vector<CpuPool2dKernel::PoolingKernel> available_kernels =
56 {
57 {
58 "neon_qu8_nhwc_poolMxN",
__anonac3f3cc30202() 59 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::QASYMM8)); },
60 REGISTER_QASYMM8_NEON(arm_compute::cpu::poolingMxN_qasymm8_neon_nhwc)
61 },
62 {
63 "neon_qs8_nhwc_poolMxN",
__anonac3f3cc30302() 64 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::QASYMM8_SIGNED)); },
65 REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::poolingMxN_qasymm8_signed_neon_nhwc)
66 },
67 {
68 "neon_f16_nhwc_poolMxN",
__anonac3f3cc30402() 69 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::F16)) && data.isa.fp16; },
70 REGISTER_FP16_NEON(arm_compute::cpu::poolingMxN_fp16_neon_nhwc)
71 },
72 {
73 "neon_fp32_nhwc_poolMxN",
__anonac3f3cc30502() 74 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::F32)); },
75 REGISTER_FP32_NEON(arm_compute::cpu::poolingMxN_fp32_neon_nhwc)
76 },
77 #if defined(ENABLE_NCHW_KERNELS)
78 {
79 "neon_qu8_nchw_pool2",
__anonac3f3cc30602() 80 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2) && (data.pool_stride_x < 3)); },
81 REGISTER_QASYMM8_NEON(arm_compute::cpu::pooling2_quantized_neon_nchw<uint8_t>)
82 },
83 {
84 "neon_qu8_nchw_pool3",
__anonac3f3cc30702() 85 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 3) && (data.pool_stride_x < 3)); },
86 REGISTER_QASYMM8_NEON(arm_compute::cpu::pooling3_quantized_neon_nchw<uint8_t>)
87 },
88 {
89 "neon_qu8_nchw_poolMxN",
__anonac3f3cc30802() 90 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8)); },
91 REGISTER_QASYMM8_NEON(arm_compute::cpu::poolingMxN_quantized_neon_nchw<uint8_t>)
92 },
93 {
94 "neon_qs8_nchw_pool2",
__anonac3f3cc30902() 95 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8_SIGNED) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2) && (data.pool_stride_x < 3)); },
96 REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::pooling2_quantized_neon_nchw<int8_t>)
97 },
98 {
99 "neon_qs8_nchw_pool3",
__anonac3f3cc30a02() 100 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8_SIGNED) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 3) && (data.pool_stride_x < 3)); },
101 REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::pooling3_quantized_neon_nchw<int8_t>)
102 },
103 {
104 "neon_qs8_nchw_poolMxN",
__anonac3f3cc30b02() 105 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8_SIGNED)); },
106 REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::poolingMxN_quantized_neon_nchw<int8_t>)
107 },
108 {
109 "neon_fp16_nchw_pool2",
__anonac3f3cc30c02() 110 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F16 && data.isa.fp16) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2)); },
111 REGISTER_FP16_NEON(arm_compute::cpu::pooling2_fp16_neon_nchw)
112 },
113 {
114 "neon_fp16_nchw_pool3",
__anonac3f3cc30d02() 115 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F16 && data.isa.fp16) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 3)); },
116 REGISTER_FP16_NEON(arm_compute::cpu::pooling3_fp16_neon_nchw)
117 },
118 {
119 "neon_fp16_nchw_poolMxN",
__anonac3f3cc30e02() 120 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F16 && data.isa.fp16)); },
121 REGISTER_FP16_NEON(arm_compute::cpu::poolingMxN_fp16_neon_nchw)
122 },
123 {
124 "neon_fp32_nchw_pool2",
__anonac3f3cc30f02() 125 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F32) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2)); },
126 REGISTER_FP32_NEON(arm_compute::cpu::pooling2_fp32_neon_nchw)
127 },
128 {
129 "neon_fp32_nchw_pool3",
__anonac3f3cc31002() 130 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F32) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 3)); },
131 REGISTER_FP32_NEON(arm_compute::cpu::pooling3_fp32_neon_nchw)
132 },
133 {
134 "neon_fp32_nchw_pool7",
__anonac3f3cc31102() 135 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F32) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 7)); },
136 REGISTER_FP32_NEON(arm_compute::cpu::pooling7_fp32_neon_nchw)
137 },
138 {
139 "neon_fp32_nchw_poolMxN",
__anonac3f3cc31202() 140 [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F32)); },
141 REGISTER_FP32_NEON(arm_compute::cpu::poolingMxN_fp32_neon_nchw)
142 },
143 #endif /* defined(ENABLE_NCHW_KERNELS) */
144 };
145
validate_arguments(const ITensorInfo * src,const ITensorInfo * dst,const PoolingLayerInfo & pool_info,const ITensorInfo * indices,Size2D pool_size)146 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info,
147 const ITensorInfo *indices, Size2D pool_size)
148 {
149 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
150 ARM_COMPUTE_RETURN_ERROR_ON(pool_size.x() == 0);
151 ARM_COMPUTE_RETURN_ERROR_ON(pool_size.y() == 0);
152
153 int pool_stride_x = 0;
154 int pool_stride_y = 0;
155 int output_width = 0;
156 int output_height = 0;
157 PoolingType pool_type = pool_info.pool_type;
158 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
159 const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
160 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
161 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
162
163 ARM_COMPUTE_RETURN_ERROR_ON_MSG((!is_data_type_float(src->data_type()))
164 && (is_pool_region_entirely_outside_input(pool_info)),
165 "Pooling region that is entirely outside input tensor is unsupported for non-float types");
166
167 std::tie(output_width, output_height) = scaled_dimensions_signed(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height],
168 pool_size.x(), pool_size.y(), pool_info.pad_stride_info);
169 ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid");
170
171 TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type()));
172 std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
173
174 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
175 if(indices)
176 {
177 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F32, DataType::F16);
178 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32);
179 ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method");
180 }
181 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
182 ARM_COMPUTE_RETURN_ERROR_ON(pool_type == PoolingType::L2 && is_data_type_quantized(src->data_type()));
183 ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(src->data_type()) && !pool_info.exclude_padding && (pool_info.pool_type == PoolingType::AVG) && pool_info.pad_stride_info.has_padding()
184 && (src->data_layout() == DataLayout::NHWC),
185 "exclude_padding equal false is not supported for AVG Pooling with padding on quantized types");
186
187 if(dst->total_size() != 0)
188 {
189 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
190 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst);
191 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info);
192 if(indices)
193 {
194 ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
195 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(indices, &out_info);
196 }
197 }
198
199 const auto *uk = CpuPool2dKernel::get_implementation(PoolDataTypeISASelectorData{ src->data_type(), src->data_layout(), pool_stride_x, pool_size, CPUInfo::get().get_isa() });
200 ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
201
202 return Status{};
203 }
204
validate_and_configure_window(ITensorInfo * src,ITensorInfo * dst,ITensorInfo * indices,const PoolingLayerInfo & pool_info,unsigned int & num_elems_processed_per_iteration,int pool_size_x,int pool_size_y)205 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, const PoolingLayerInfo &pool_info,
206 unsigned int &num_elems_processed_per_iteration,
207 int pool_size_x, int pool_size_y)
208 {
209 // dst auto inizialitation if not yet initialized
210 auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_pool_shape(*src, pool_info)));
211 if(indices)
212 {
213 // Indices auto inizialitation if not yet initialized
214 auto_init_if_empty(*indices, (src->clone()->set_tensor_shape(compute_pool_shape(*src,
215 pool_info)))
216 .set_data_type(DataType::U32) /* we store the offset to the element */);
217 }
218 const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
219
220 int pool_stride_x = 0;
221 int pool_stride_y = 0;
222 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
223 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
224 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
225
226 std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
227 const bool is_square = pool_size_x == pool_size_y;
228 const unsigned int pooled_w = dst->dimension(idx_width);
229 const unsigned int pooled_h = dst->dimension(idx_height);
230
231 //If it's not squared and optimized will be executed the MxN
232 num_elems_processed_per_iteration = 1;
233
234 if(is_square)
235 {
236 switch(src->data_type())
237 {
238 case DataType::QASYMM8:
239 case DataType::QASYMM8_SIGNED:
240 switch(pool_size_x)
241 {
242 case 2:
243 num_elems_processed_per_iteration = (pool_stride_x == 2) ? 8 : 15;
244 break;
245 case 3:
246 num_elems_processed_per_iteration = (pool_stride_x == 2) ? 7 : 14;
247 break;
248 default:
249 break;
250 }
251 break;
252 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
253 case DataType::F16:
254 num_elems_processed_per_iteration = 1;
255 break;
256 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
257 case DataType::F32:
258 num_elems_processed_per_iteration = 1;
259 break;
260 default:
261 ARM_COMPUTE_ERROR("Element size not supported");
262 break;
263 }
264 }
265
266 bool window_changed = false;
267 Window win{};
268 // Upper limit for the number of right/bottom border elements that are accessed
269 TensorShape dst_shape{ src->tensor_shape() };
270 dst_shape.set(0, pooled_w);
271 dst_shape.set(1, pooled_h);
272 TensorInfo dst_info(src->clone()->set_tensor_shape(dst_shape));
273 win = calculate_max_window(dst_info, Steps(num_elems_processed_per_iteration));
274
275 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
276 return std::make_pair(err, win);
277 }
278 } // namespace
279
configure(ITensorInfo * src,ITensorInfo * dst,const PoolingLayerInfo & pool_info,ITensorInfo * indices)280 void CpuPool2dKernel::configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
281 {
282 ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
283 const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
284 const bool is_global_pooling = pool_info.is_global_pooling;
285
286 // Get data layout
287 const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
288 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
289 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
290
291 // Update pool size in case of global pooling
292 const Size2D pool_size(
293 is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width,
294 is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height);
295
296 // Perform validation step
297 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices, pool_size));
298
299 const auto *uk = CpuPool2dKernel::get_implementation(PoolDataTypeISASelectorData{ src->data_type(), src->data_layout(), (int)pad_stride_info.stride().first, pool_size, CPUInfo::get().get_isa() });
300 ARM_COMPUTE_ERROR_ON(uk == nullptr);
301
302 // Set instance variables
303 _pool_info = pool_info;
304 _data_layout = src->data_layout();
305 _pool_size = pool_size;
306 _pool_stride_x = pad_stride_info.stride().first;
307 _run_method = uk->ukernel;
308 _name = std::string("CpuPool2dKernel").append("/").append(uk->name);
309
310 if(_data_layout == DataLayout::NHWC)
311 {
312 // Configure kernel window
313 Window win = calculate_max_window(*dst, Steps());
314 ICpuKernel::configure(win);
315 }
316 else
317 {
318 // Configure kernel window
319 auto win_config = validate_and_configure_window(src, dst, indices, pool_info, _num_elems_processed_per_iteration,
320 pool_size.x(), pool_size.y());
321 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
322 ICpuKernel::configure(win_config.second);
323 }
324 }
325
validate(const ITensorInfo * src,const ITensorInfo * dst,const PoolingLayerInfo & pool_info,const ITensorInfo * indices)326 Status CpuPool2dKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
327 {
328 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
329
330 unsigned int num_elems_processed_per_iteration = 0;
331
332 const bool is_global_pooling = pool_info.is_global_pooling;
333
334 // Get data layout
335 const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
336 const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
337 const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
338
339 unsigned int pool_size_x = is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
340 unsigned int pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
341
342 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices, Size2D(pool_size_x, pool_size_y)));
343 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(),
344 (indices) ? indices->clone().get() : nullptr, pool_info, num_elems_processed_per_iteration,
345 pool_size_x, pool_size_y)
346 .first);
347
348 return Status{};
349 }
350
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)351 void CpuPool2dKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
352 {
353 ARM_COMPUTE_UNUSED(info);
354 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
355 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
356 ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
357
358 const ITensor *src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
359 ITensor *dst = tensors.get_tensor(TensorType::ACL_DST_0);
360 ITensor *indices = tensors.get_tensor(TensorType::ACL_DST_1);
361
362 const unsigned int pool_stride_x = _pool_info.pad_stride_info.stride().first;
363 const unsigned int pool_stride_y = _pool_info.pad_stride_info.stride().second;
364 const unsigned int pool_size = _pool_info.pool_size.width;
365
366 Window window_src(window);
367 if(_data_layout == DataLayout::NCHW)
368 {
369 // Set step for src in x and y direction for the src
370 unsigned int window_x_inc = 0;
371 switch(src->info()->data_type())
372 {
373 case DataType::QASYMM8:
374 case DataType::QASYMM8_SIGNED:
375 {
376 window_x_inc = pool_stride_x;
377 if((pool_size == 2 || pool_size == 3) && pool_stride_x < 3)
378 {
379 window_x_inc = (pool_stride_x == 2) ? _num_elems_processed_per_iteration * 2 : _num_elems_processed_per_iteration;
380 }
381 break;
382 }
383
384 case DataType::F16:
385 case DataType::F32:
386 {
387 window_x_inc = pool_stride_x;
388 break;
389 }
390 default:
391 {
392 ARM_COMPUTE_ERROR("Not supported");
393 }
394 }
395 window_src.set(Window::DimX, Window::Dimension(window.x().start() * pool_stride_x, window.x().end() * pool_stride_x, window_x_inc));
396 window_src.set(Window::DimY, Window::Dimension(window.y().start() * pool_stride_y, window.y().end() * pool_stride_y, pool_stride_y));
397 }
398 else
399 {
400 window_src.set(Window::DimX, Window::Dimension(0, 1, 1));
401 window_src.set(Window::DimY, Window::Dimension(0, src->info()->dimension(1), pool_stride_x));
402 window_src.set(Window::DimZ, Window::Dimension(0, src->info()->dimension(2), pool_stride_y));
403 }
404 _run_method(src, dst, indices, _pool_info, window_src, window);
405 }
406
name() const407 const char *CpuPool2dKernel::name() const
408 {
409 return _name.c_str();
410 }
411
get_available_kernels()412 const std::vector<CpuPool2dKernel::PoolingKernel> &CpuPool2dKernel::get_available_kernels()
413 {
414 return available_kernels;
415 }
416
417 } // namespace kernels
418 } // namespace cpu
419 } // namespace arm_compute
420