xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1/*
2 * Copyright (c) 2022-2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "activation_float_helpers.h"
25#include "helpers.h"
26#include "tile_helpers.h"
27
28#if defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
29//! @cond Doxygen_Suppress
30/** OpenCL kernel to compute the indirect convolution 2d indirect buffer.
31 *
32 * @note This kernel only works for unit batch_size
33 *
34 * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
35 * @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
36 * @note The kernel width must be passed at compile time using -DWEI_CONV_WIDTH (e.g. -DWEI_CONV_WIDTH=9)
37 * @note The spatial dimensions of the source tensor used by conv2d must be passed at compile time using -DSRC_CONV_WIDTH and -DSRC_CONV_HEIGHT (e.g. -DSRC_CONV_WIDTH=96, -DSRC_CONV_HEIGHT=64)
38 * @note The width dimension of the destination tensor produced by conv2d must be passed at compile time using -DDST_CONV_WIDTH (e.g. -DDST_CONV_WIDTH=96)
39 * @note The tensor type ("BUFFER" only) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
40 * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
41 * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
42 *  - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
43 *
44 * @param[out] dst_img                           (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
45 * @param[out] dst_ptr                           Pointer to the destination tensor. Supported data type: INT32
46 * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
47 * @param[in]  dst_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
48 * @param[in]  dst_stride_w                      Stride of the destination tensor in W dimension (in bytes)
49 * @param[in]  dst_c                             The size of the channels dimension of the destination tensor
50 * @param[in]  dst_w                             The size of the width dimension of the destination tensor
51 * @param[in]  dst_h                             The size of the height dimension of the destination tensor
52 * @param[in]  dst_n                             The size of the batches dimension of the destination tensor
53 * @param[in]  dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
54 */
55//! @endcond
56__kernel void indirect_convolution_address_precalculation(
57    TENSOR4D_WO_T(dst, DST_TENSOR_TYPE))
58{
59    const int x = get_global_id(0);
60    const int y = get_global_id(1);
61    const int z = get_global_id(2);
62
63    // Note: WIDTH = M0 x KernelWidth x KernelHeight
64
65    // m index
66    const int mi = x % M0;
67    // Kernel index
68    const int ki = x / M0;
69    // Kernel width coordinate
70    const int xk = ki % WEI_CONV_WIDTH;
71    // kernel height coordinate
72    const int yk = ki / WEI_CONV_WIDTH;
73
74    TILE(DST_DATA_TYPE, 1, 1, xi);
75    TILE(DST_DATA_TYPE, 1, 1, yi);
76    TILE(DST_DATA_TYPE, 1, 1, my);
77
78    const int mout = y * M0;
79
80    xi[0].s[0] = ((mout + mi) % DST_CONV_WIDTH) * STRIDE_X;
81    yi[0].s[0] = ((mout + mi) / DST_CONV_WIDTH) * STRIDE_Y;
82    xi[0].s[0] -= PAD_LEFT;
83    yi[0].s[0] -= PAD_TOP;
84
85    const int x_s = xi[0].s[0] + xk;
86    const int y_s = yi[0].s[0] + yk;
87    my[0].s[0]    = x_s + y_s * SRC_CONV_WIDTH;
88    my[0].s[0]    = my[0].s[0] + z * (int)(SRC_CONV_WIDTH * SRC_CONV_HEIGHT);
89    my[0].s[0]    = select(-1, my[0].s[0], x_s >= 0);
90    my[0].s[0]    = select(-1, my[0].s[0], x_s < SRC_CONV_WIDTH);
91    my[0].s[0]    = select(-1, my[0].s[0], y_s >= 0);
92    my[0].s[0]    = select(-1, my[0].s[0], y_s < SRC_CONV_HEIGHT);
93
94    VSTORE(1)
95    (my[0].s[0], 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DST_DATA_TYPE) + y * dst_stride_y + z * dst_stride_z));
96}
97#endif // defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
98
99#if defined(INDIRECT_CONVOLUTION_NHWC)
100//! @cond Doxygen_Suppress
101/** OpenCL kernel to compute the indirect convolution.
102 *
103 * @note Data layout supported: NHWC
104 * @note Data type supported: F32/F16
105 * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
106 * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
107 * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
108 * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
109 * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
110 * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
111 * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
112 * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
113 * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
114 * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
115 * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
116 * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
117 * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
118 * @note The vector length used for loading the values from the indirect buffer should be passed at compile time using -DIND_BUFF_VEC_SIZE (e.g. -DIND_BUFF_VEC_SIZE=4)
119 * @note The activation function to fuse and corresponding A and B values should be passed at compile time using -DACTIVATION_TYPE, -DA_VAL, and -DB_VAL
120 *        (e.g. -DFUNCTION_TYPE=lu_brelu_op, -DA_VAL=3.0, and -DB_VAL=1.0)
121 * @note Only the following configurations of M0, N0 and K0 are currently supported:
122 *  - M0 = 1, 2, 3, 4, 5, 6, and 8
123 *  - N0 = 2, 3, 4, 8, 16
124 *  - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
125 *
126 * @param[in]  src_img                           (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
127 * @param[in]  src_ptr                           Pointer to the source tensor. Supported data type: F16/F32
128 * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
129 * @param[in]  src_stride_z                      Stride of the source tensor in Z dimension (in bytes)
130 * @param[in]  src_stride_w                      Stride of the source tensor in W dimension (in bytes)
131 * @param[in]  src_c                             The size of the channels dimension of the source tensor
132 * @param[in]  src_w                             The size of the width dimension of the source tensor
133 * @param[in]  src_h                             The size of the height dimension of the source tensor
134 * @param[in]  src_n                             The size of the batches dimension of the source tensor
135 * @param[in]  src_offset_first_element_in_bytes The offset of the first element in the source tensor
136 * @param[in]  off_img                           (Not supported) Read only cl_image object for the indirect buffer tensor. Included when OFF_TENSOR_TYPE=IMAGE
137 * @param[in]  off_ptr                           Pointer to the indirect buffer tensor. Supported data type: INT32
138 * @param[in]  off_stride_y                      Stride of the indirect buffer tensor in Y dimension (in bytes)
139 * @param[in]  off_stride_z                      Stride of the indirect buffer tensor in Z dimension (in bytes)
140 * @param[in]  off_stride_w                      Stride of the indirect buffer tensor in W dimension (in bytes)
141 * @param[in]  off_c                             The size of the channels dimension of the indirect buffer tensor
142 * @param[in]  off_w                             The size of the width dimension of the indirect buffer tensor
143 * @param[in]  off_h                             The size of the height dimension of the indirect buffer tensor
144 * @param[in]  off_n                             The size of the batches dimension of the indirect buffer tensor
145 * @param[in]  off_offset_first_element_in_bytes The offset of the first element in the indirect buffer tensor
146 * @param[out] dst_img                           (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
147 * @param[out] dst_ptr                           Pointer to the destination tensor. Supported data type: same as @p src_ptr
148 * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
149 * @param[in]  dst_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
150 * @param[in]  dst_stride_w                      Stride of the destination tensor in W dimension (in bytes)
151 * @param[in]  dst_c                             The size of the channels dimension of the destination tensor
152 * @param[in]  dst_w                             The size of the width dimension of the destination tensor
153 * @param[in]  dst_h                             The size of the height dimension of the destination tensor
154 * @param[in]  dst_n                             The size of the batches dimension of the destination tensor
155 * @param[in]  dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
156 * @param[out] wei_img                           (Optional) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
157 * @param[out] wei_ptr                           Pointer to the weights tensor. Supported data type: same as @p src_ptr
158 * @param[in]  wei_stride_y                      Stride of the weights tensor in Y dimension (in bytes)
159 * @param[in]  wei_stride_z                      Stride of the weights tensor in Z dimension (in bytes)
160 * @param[in]  wei_stride_w                      Stride of the weights tensor in W dimension (in bytes)
161 * @param[in]  wei_c                             The size of the channels dimension of the weights tensor
162 * @param[in]  wei_w                             The size of the width dimension of the weights tensor
163 * @param[in]  wei_h                             The size of the height dimension of the weights tensor
164 * @param[in]  wei_n                             The size of the batches dimension of the weights tensor
165 * @param[in]  wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
166 * @param[in]  bia_ptr                           (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr
167 * @param[in]  bia_stride_x                      (Optional) Stride of the bias tensor in X dimension (in bytes)
168 * @param[in]  bia_step_x                        (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
169 * @param[in]  bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
170 */
171//! @endcond
172__kernel void indirect_convolution_nhwc(
173    TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
174    TENSOR4D_RO_T(off, OFF_TENSOR_TYPE),
175    TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
176    TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
177#if defined(HAS_BIAS)
178    ,
179    VECTOR_DECLARATION(bia)
180#endif // defined(HAS_BIAS)
181)
182{
183    // All the tensor dimensions are passed at compile time.
184    // In case of dynamic tensor support, the following dimensions should be passed as function argument.
185#define _IWEI_WIDTH WEI_WIDTH
186#define _IWEI_HEIGHT WEI_HEIGHT
187#define _ISRC_CHANNELS SRC_CHANNELS
188#define _IDST_WIDTH DST_WIDTH
189#define _IDST_HEIGHT DST_HEIGHT
190#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
191
192    const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
193    const int mout = GET_SPATIAL_IDX(1, M0, 0);          // WIDTH x HEIGHT
194    const int bout = GET_SPATIAL_IDX(2, 1, 0);           // BATCH SIZE IDX
195
196    off_offset_first_element_in_bytes += get_global_id(1) * off_stride_y;
197    off_offset_first_element_in_bytes += bout * off_stride_z;
198
199    // Initialize the accumulators
200    TILE(DST_DATA_TYPE, M0, N0, c);
201
202    LOOP_UNROLLING(int, i, 0, 1, M0,
203    {
204        c[i].v = 0;
205    })
206
207    for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
208    {
209        TILE(int, 1, IND_BUFF_VEC_SIZE, my);
210        T_LOAD(int, 1, IND_BUFF_VEC_SIZE, OFF_TENSOR_TYPE, off, i * M0, 0, 1, 0, my);
211
212        int ck = 0;
213        for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
214        {
215            TILE(SRC_DATA_TYPE, M0, K0, a);
216            TILE(WEI_DATA_TYPE, N0, K0, b);
217
218            // Initialize tiles
219            LOOP_UNROLLING(int, i, 0, 1, M0,
220            {
221                a[i].v = 0.0;
222            })
223
224            LOOP_UNROLLING(int, i, 0, 1, N0,
225            {
226                b[i].v = 0.0;
227            })
228
229            // Load tile from the src tensor
230            T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
231
232            // Load tile from the weights tensor
233            T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
234
235            // Compute the matrix multiplication between two tiles
236            T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
237        }
238
239        // This #if directive should be removed in case of dynamic tensor support
240#if defined(LEFTOVER_LOOP)
241        // Left-over accumulations
242        for(; ck < _ISRC_CHANNELS; ++ck)
243        {
244            TILE(SRC_DATA_TYPE, M0, 1, a);
245            TILE(WEI_DATA_TYPE, N0, 1, b);
246
247            // Initialize tiles
248            LOOP_UNROLLING(int, i, 0, 1, M0,
249            {
250                a[i].v = 0.0;
251            })
252
253            LOOP_UNROLLING(int, i, 0, 1, N0,
254            {
255                b[i].v = 0.0;
256            })
257
258            // Load tile from the src tensor
259            T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
260
261            // Load tile from the weights tensor
262            // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
263            T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
264
265            // Compute the matrix multiplication between two tiles
266            T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
267        }
268#endif // defined(LEFTOVER_LOOP)
269    }
270
271#if defined(HAS_BIAS)
272    TILE(BIA_DATA_TYPE, 1, N0, bias0);
273
274    T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
275
276    // c = c + bias[broadcasted]
277    T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, c, bias0, c);
278
279#endif // HAS_BIAS
280
281    // Apply activation
282    T_ACTIVATION(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
283
284    TILE(uint, M0, 1, dst_indirect_y);
285
286    // Calculate the destination indirect Y
287    LOOP_UNROLLING(int, i, 0, 1, M0,
288    {
289        dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
290        dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
291    })
292
293    const bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
294
295    // Store the tile in reverse order so the invalid values are overwritten with the valid ones
296    T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, c, dst_indirect_y);
297
298#undef _IWEI_WIDTH
299#undef _IWEI_HEIGHT
300#undef _ISRC_CHANNELS
301#undef _IDST_WIDTH
302#undef _IDST_HEIGHT
303#undef _IY_MULTIPLIER
304}
305#endif // defined(INDIRECT_CONVOLUTION_NHWC)
306