1/* 2 * Copyright (c) 2017-2021 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24#include "helpers.h" 25 26#if defined(POOL_AVG) || defined(POOL_L2) 27#define POOL_OP(x, y) ((x) + (y)) 28#else /* defined(POOL_AVG) || defined(POOL_L2) */ 29#if defined(QUANTIZED) 30#define POOL_OP(x, y) (max((x), (y))) 31#else // defined(QUANTIZED) 32#define POOL_OP(x, y) (fmax((x), (y))) 33#endif // defined(QUANTIZED) 34#endif /* defined(POOL_AVG) || defined(POOL_L2) */ 35 36#if defined(POOL_L2) 37#define POW2_OP(x, vec_size) ((x) * (x)) 38#else /* defined(POOL_L2) */ 39#define POW2_OP(x, vec_size) (x) 40#endif /* defined(POOL_L2) */ 41 42#define DIV_OP(x, y) (x * (1.f / y)) 43#define SQRT_OP(x) sqrt((x)) 44 45#if defined(FP_MIXED_PRECISION) || defined(QUANTIZED) 46#define CONVERT_TO_ACC_DATA_TYPE(x, n) CONVERT(x, VEC_DATA_TYPE(ACC_DATA_TYPE, n)) 47#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n) 48#else /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/ 49#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) vload##n(offset, ptr) 50#endif /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/ 51 52ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h, 53 const int pad_x, const int pad_y, const int stride_x, const int stride_y) 54{ 55 int start_x = get_global_id(0) * stride_x - pad_x; 56 int start_y = get_global_id(1) * stride_y - pad_y; 57 const int end_x = min(start_x + pool_size_x, upper_bound_w); 58 const int end_y = min(start_y + pool_size_y, upper_bound_h); 59#if defined(EXCLUDE_PADDING) 60 start_x = max(0, start_x); 61 start_y = max(0, start_y); 62#endif /* defined(EXCLUDE_PADDING) */ 63 return ((end_y - start_y) * (end_x - start_x)); 64} 65 66#if defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) 67 68/** Performs a pooling function of pool size equal to N (NCHW) 69 * 70 * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32/QASYMM8; 71 * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13; 72 * @note In case of average pooling the following information must be passed at compile time: 73 * -DPOOL_AVG must be provided otherwise max pooling will be performed. 74 * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad) 75 * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions 76 * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension 77 * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0 78 * 79 * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32/QASYMM8 80 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) 81 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) 82 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) 83 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) 84 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) 85 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) 86 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor 87 * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr 88 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) 89 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) 90 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) 91 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) 92 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) 93 * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) 94 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor 95 */ 96__kernel void pooling_layer_MxN_nchw( 97 TENSOR3D_DECLARATION(src), 98 TENSOR3D_DECLARATION(dst)) 99{ 100 int id0 = get_global_id(0); 101 int id1 = get_global_id(1); 102 int id2 = get_global_id(2); 103 104 int x_coords = (id0 * STRIDE_X) - PAD_X; 105 int y_coords = (id1 * STRIDE_Y) - PAD_Y; 106 107 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + y_coords * (int)src_stride_y + id2 * src_stride_z; 108 109 VEC_DATA_TYPE(ACC_DATA_TYPE, 8) 110 vdata = INITIAL_VALUE; 111 ACC_DATA_TYPE sdata = INITIAL_VALUE; 112 113 const int end_x = min((int)POOL_SIZE_X, (int)(SRC_WIDTH - x_coords)); 114 const int end_y = min((int)POOL_SIZE_Y, (int)(SRC_HEIGHT - y_coords)); 115 116 // Load data 117 for(int y = 0; y < end_y; ++y) 118 { 119 if((y_coords + y) >= 0) 120 { 121 int x = 0; 122 for(; x <= (end_x - 8); x += 8) 123 { 124 int8 src_x = (int8)(x_coords + x) + VEC_OFFS(int, 8); 125#if defined(POOL_AVG) || defined(POOL_L2) 126 SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8) 127 cond_x = CONVERT(src_x < 0, SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8)); 128 src_x = clamp(src_x, (int8)0, (int8)(SRC_WIDTH - 1)); 129 VEC_DATA_TYPE(ACC_DATA_TYPE, 8) 130 data0 = select(VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y)), (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))0, REVERSE(cond_x, 8)); 131#else // defined(POOL_AVG) || defined(POOL_L2) 132 src_x = clamp(src_x, 0, SRC_WIDTH - 1); 133 VEC_DATA_TYPE(ACC_DATA_TYPE, 8) 134 data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y)); 135#endif // defined(POOL_AVG) || defined(POOL_L2 136 137#if defined(POOL_L2) 138 // Raise to power of 2 for L2 Pooling 139 data0 *= data0; 140#endif /* defined(POOL_L2) */ 141 142 vdata = POOL_OP(vdata, data0); 143 } 144 145 // Leftover 146 for(; x < end_x; ++x) 147 { 148 int src_x = x_coords + x; 149#if defined(POOL_AVG) || defined(POOL_L2) 150 SELECT_DATA_TYPE(ACC_DATA_TYPE) 151 cond_x = (src_x < 0); 152 src_x = clamp(src_x, 0, SRC_WIDTH - 1); 153 ACC_DATA_TYPE data0 = select((ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y))), (ACC_DATA_TYPE)0, cond_x); 154#else // defined(POOL_AVG) || defined(POOL_L2) 155 src_x = clamp(src_x, 0, SRC_WIDTH - 1); 156 ACC_DATA_TYPE data0 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y))); 157#endif // defined(POOL_AVG) || defined(POOL_L2) 158 159#if defined(POOL_L2) 160 // Raise to power of 2 for L2 Pooling 161 data0 *= data0; 162#endif /* defined(POOL_L2) */ 163 164 sdata = POOL_OP(sdata, data0); 165 } 166 } 167 } 168 169 // Reduce result 170 VEC_DATA_TYPE(ACC_DATA_TYPE, 4) 171 reduce4 = POOL_OP(vdata.s0123, vdata.s4567); 172 VEC_DATA_TYPE(ACC_DATA_TYPE, 2) 173 reduce2 = POOL_OP(reduce4.s01, reduce4.s23); 174 ACC_DATA_TYPE res = POOL_OP(reduce2.s0, reduce2.s1); 175 res = POOL_OP(res, sdata); 176 177#if defined(POOL_AVG) || defined(POOL_L2) 178 // Divide by pool region in case of average pooling 179 res = DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y)); 180#endif /* defined(POOL_AVG) || defined(POOL_L2) */ 181 182#if defined(QUANTIZED) 183 184 DATA_TYPE result_q8 = CONVERT(res, DATA_TYPE); 185 186#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) 187 188 const float result_f32 = convert_float(result_q8); 189 const float input_offset = (float)OFFSET_IN1; 190 const float input_scale = (float)SCALE_IN1; 191 const float scale_out = (float)SCALE_OUT; 192 const float offset_out = (float)OFFSET_OUT; 193 const float in_f32 = (result_f32 - input_offset) * input_scale; 194 const float out_f32 = in_f32 / scale_out + offset_out; 195 result_q8 = CONVERT_SAT(convert_int_rte(out_f32), DATA_TYPE); 196 197#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */ 198 199 *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = result_q8; 200 201#else // defined(QUANTIZED) 202 203#if defined(POOL_L2) 204 // Take square root of the result in L2 pooling 205 res = SQRT_OP(res); 206#endif /* defined(POOL_L2) */ 207 208 // Store result 209 *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = (DATA_TYPE)res; 210#endif // defined(QUANTIZED) 211} 212#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y) 213 214/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW. 215 * 216 * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32 217 * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13; 218 * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT 219 * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions 220 * 221 * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 222 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) 223 * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) 224 * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) 225 * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) 226 * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) 227 * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) 228 * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor 229 * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr 230 * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) 231 * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) 232 * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) 233 * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) 234 * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) 235 * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) 236 * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor 237 * @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32 238 * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes) 239 * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes) 240 * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes) 241 * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes) 242 * @param[in] indices_stride_z Stride of the indices tensor in Z dimension (in bytes) 243 * @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes) 244 * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor 245 */ 246__kernel void pooling_layer_2_nchw_indices( 247 TENSOR3D_DECLARATION(src), 248 TENSOR3D_DECLARATION(dst), 249 TENSOR3D_DECLARATION(indices)) 250{ 251 int id0 = get_global_id(0); 252 int id1 = get_global_id(1); 253 int id2 = get_global_id(2); 254 255 int2 x_coords = clamp((int2)((id0 * STRIDE_X) - PAD_X), (int2)0, (int2)(SRC_WIDTH - 1)); 256 int2 y_coords = clamp((int2)((id1 * STRIDE_Y) - PAD_Y) + VEC_OFFS(int, 2), (int2)0, (int2)(SRC_HEIGHT - 1)); 257 258 __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + id2 * src_stride_z; 259 260 // Load data 261 VEC_DATA_TYPE(DATA_TYPE, 2) 262 data0 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s0 * sizeof(DATA_TYPE) + y_coords.s0 * (int)src_stride_y)); 263 VEC_DATA_TYPE(DATA_TYPE, 2) 264 data1 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s1 * sizeof(DATA_TYPE) + y_coords.s1 * (int)src_stride_y)); 265 266 // Perform calculations 267 DATA_TYPE data0_max = POOL_OP(data0.s0, data0.s1); 268 DATA_TYPE data1_max = POOL_OP(data1.s0, data1.s1); 269 DATA_TYPE res = POOL_OP(data0_max, data1_max); 270 // Store result 271 *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = res; 272 273#if defined(SRC_BATCH) 274 275 uint offset_top = (x_coords.s0 + y_coords.s0 * SRC_WIDTH + id2 * (SRC_WIDTH * SRC_HEIGHT)) % SRC_BATCH; 276 uint offset_bottom = offset_top + SRC_WIDTH; 277 278 uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1)); 279 uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1)); 280 uint index = select(index1, index0, isgreaterequal(data0_max, data1_max)); 281 282 *(__global uint *)(indices_ptr + indices_offset_first_element_in_bytes + id0 * sizeof(uint) + id1 * indices_stride_y + id2 * indices_stride_z) = index; 283 284#endif // defined(SRC_BATCH) 285}