xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1/*
2 * Copyright (c) 2022-2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "helpers.h"
26#include "tile_helpers.h"
27
28//! @cond Doxygen_Suppress
29/** OpenCL kernel to compute the transposed convolution.
30 *
31 * @note Data layout supported: NHWC
32 * @note Data type supported: F32/F16/QASYMM8/QASYMM8_SIGNED
33 * @note The transposed convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
34 * @note The transposed convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
35 * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
36 * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
37 * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
38 * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
39 * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDST_CHANNELS=64)
40 * @note The tensor type (currently only "BUFFER" is supported) of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
41 * @note The tensor type (currently only "BUFFER" is supported) of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
42 * @note The tensor type (currently only "BUFFER" is supported) of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
43 * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
44 * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
45 * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
46 * @note The data type of the destination tensor must be passed at compile time using -DBIA_DATA_TYPE (e.g. -DBIA_DATA_TYPE=float)
47 * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
48 * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
49 * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
50 * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
51 * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
52 * @note If bias exists, the compile time argument -DHAS_BIAS should be passed
53 * @note Only the following configurations of M0, N0 and K0 are currently supported:
54 *  - M0 = 1
55 *  - N0 = 1, 2, 3, 4, 8, 16
56 *  - K0 = 1, 2, 3, 4, 8, 16
57 *
58 * @note In case of QASYMM8/QASYMM8_SIGNED, the following extra information must be passed at compile time:
59 * - -DIS_QUANTIZED
60 * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
61 * - The destination quantization shift e.g. -DDST_SHIFT=4
62 * - The destination offset e.g. -DDST_OFFSET=4
63 * - The source offset e.g. -DSRC_OFFSET=4
64 * - The weights offset e.g. -DWEI_OFFSET=4
65 * - The quantized zero value e.g. -DZERO_VALUE=4
66 *
67 * @param[in]  src_img                           (Not supported) Read only cl_image object for the source tensor. Included when SRC_TENSOR_TYPE=IMAGE
68 * @param[in]  src_ptr                           Pointer to the source tensor. Supported data type: F16/F32
69 * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
70 * @param[in]  src_stride_z                      Stride of the source tensor in Z dimension (in bytes)
71 * @param[in]  src_stride_w                      Stride of the source tensor in W dimension (in bytes)
72 * @param[in]  src_c                             The size of the channels (IFM) dimension of the source tensor
73 * @param[in]  src_w                             The size of the width dimension of the source tensor
74 * @param[in]  src_h                             The size of the height dimension of the source tensor
75 * @param[in]  src_n                             The size of the batches dimension of the source tensor
76 * @param[out] dst_img                           (Not supported) Write only cl_image object for the destination tensor. Included when DST_TENSOR_TYPE=IMAGE
77 * @param[out] dst_ptr                           Pointer to the destination tensor. Supported data type: same as @p src_ptr
78 * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
79 * @param[in]  dst_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
80 * @param[in]  dst_stride_w                      Stride of the destination tensor in W dimension (in bytes)
81 * @param[in]  dst_c                             The size of the channels (OFM) dimension of the destination tensor
82 * @param[in]  dst_w                             The size of the width dimension of the destination tensor
83 * @param[in]  dst_h                             The size of the height dimension of the destination tensor
84 * @param[in]  dst_n                             The size of the batches dimension of the destination tensor
85 * @param[in]  dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
86 * @param[in]  wei_img                           (Not supported) Read only cl_image object for the weights tensor. Included when WEI_TENSOR_TYPE=IMAGE
87 * @param[in]  wei_ptr                           Pointer to the weights tensor. Supported data type: same as @p src_ptr
88 * @param[in]  wei_stride_y                      Stride of the weights tensor in Y dimension (in bytes)
89 * @param[in]  wei_stride_z                      Stride of the weights tensor in Z dimension (in bytes)
90 * @param[in]  wei_stride_w                      Stride of the weights tensor in W dimension (in bytes)
91 * @param[in]  wei_c                             The size of the channels (IFM) dimension of the weights tensor
92 * @param[in]  wei_w                             The size of the width dimension of the weights tensor
93 * @param[in]  wei_h                             The size of the height dimension of the weights tensor
94 * @param[in]  wei_n                             The size of the batches (OFM) dimension of the weights tensor
95 * @param[in]  wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
96 * @param[in]  bia_ptr                           (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16)
97 * @param[in]  bia_stride_x                      (Optional) Stride of the bias tensor in X dimension (in bytes)
98 * @param[in]  bia_step_x                        (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
99 * @param[in]  bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
100 */
101//! @endcond
102__kernel void transposed_convolution_nhwc(
103    TENSOR4D_RO_T(src, SRC_TENSOR_TYPE),
104    TENSOR4D_WO_T(dst, DST_TENSOR_TYPE),
105    TENSOR4D_RO_T(wei, WEI_TENSOR_TYPE)
106#if defined(HAS_BIAS)
107    ,
108    VECTOR_DECLARATION(bia)
109#endif // defined(HAS_BIAS)
110)
111{
112    // All the tensor dimensions are passed at compile time.
113    // In case of dynamic tensor support, the following dimensions should be passed as function argument.
114#define _IWEI_WIDTH WEI_WIDTH
115#define _IWEI_HEIGHT WEI_HEIGHT
116#define _ISRC_WIDTH SRC_WIDTH
117#define _ISRC_HEIGHT SRC_HEIGHT
118#define _ISRC_CHANNELS SRC_CHANNELS
119#define _IDST_WIDTH DST_WIDTH
120#define _IDST_HEIGHT DST_HEIGHT
121#define _IDST_CHANNELS DST_CHANNELS
122#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
123
124#if defined(IS_QUANTIZED)
125#define _IOUTPUT_TILE cq
126#else // defined(IS_QUANTIZED)
127#define _IOUTPUT_TILE c
128#endif // defined(IS_QUANTIZED)
129
130    const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
131    const int mout = GET_SPATIAL_IDX(1, M0, 0);          // WIDTH x HEIGHT
132    const int bout = GET_SPATIAL_IDX(2, 1, 0);           // BATCH SIZE IDX
133
134    // .v    = access the whole vector (OpenCL vector)
135    // .s[x] = access the vector element at position x (scalar access)
136    TILE(int, 1, M0, xi);
137    TILE(int, 1, M0, yi);
138    TILE(int, 1, M0, xu);
139    TILE(int, 1, M0, yu);
140
141    // Convert the linear index to coordinate
142    LOOP_UNROLLING(int, i, 0, 1, M0,
143    {
144        xu[0].s[i] = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
145        yu[0].s[i] = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
146        xi[0].s[i] = ceil(xu[0].s[i] / (float)STRIDE_X);
147        yi[0].s[i] = ceil(yu[0].s[i] / (float)STRIDE_Y);
148    })
149
150    // Initialize the accumulators
151    TILE(ACC_DATA_TYPE, M0, N0, c);
152
153    LOOP_UNROLLING(int, i, 0, 1, M0,
154    {
155        c[i].v = 0;
156    })
157
158    // Flipped indices
159    const int x_start = _IWEI_WIDTH - (xi[0].s[0] * STRIDE_X - xu[0].s[0]) - 1;
160    const int y_start = _IWEI_HEIGHT - (yi[0].s[0] * STRIDE_Y - yu[0].s[0]) - 1;
161
162    for(int yk = y_start, yi_step = 0; yk >= 0; yk -= STRIDE_Y, ++yi_step)
163    {
164        for(int xk = x_start, xi_step = 0; xk >= 0; xk -= STRIDE_X, ++xi_step)
165        {
166            const int weights_y = cout * _IY_MULTIPLIER + yk * _IWEI_WIDTH + xk;
167
168            TILE(int, 1, M0, my);
169
170            LOOP_UNROLLING(int, i, 0, 1, M0,
171            {
172                int x_s    = xi[0].s[i] + xi_step;
173                int y_s    = yi[0].s[i] + yi_step;
174                my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
175                my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
176                my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
177                my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
178                my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
179                my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
180            })
181
182            int ck = 0;
183            for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
184            {
185                TILE(SRC_DATA_TYPE, M0, K0, a);
186                TILE(WEI_DATA_TYPE, N0, K0, b);
187
188                // Initialize tiles
189                LOOP_UNROLLING(int, i, 0, 1, M0,
190                {
191                    a[i].v = ZERO_VALUE;
192                })
193
194                LOOP_UNROLLING(int, i, 0, 1, N0,
195                {
196                    b[i].v = ZERO_VALUE;
197                })
198
199                // Load tile from the src tensor
200                T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
201
202                // Load tile from the weights tensor
203                T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
204
205                // Compute the matrix multiplication between two tiles
206                T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
207
208#if defined(IS_QUANTIZED)
209                // Apply the offset correction (correction usually needed for asymmetric quantized computation)
210                // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
211                T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
212#endif // defined(IS_QUANTIZED)
213            }
214
215            // This #if directive should be removed in case of dynamic tensor support
216#if defined(LEFTOVER_LOOP)
217            // Left-over accumulations
218            for(; ck < _ISRC_CHANNELS; ++ck)
219            {
220                TILE(SRC_DATA_TYPE, M0, 1, a);
221                TILE(WEI_DATA_TYPE, N0, 1, b);
222
223                // Initialize tiles
224                LOOP_UNROLLING(int, i, 0, 1, M0,
225                {
226                    a[i].v = ZERO_VALUE;
227                })
228
229                // Load tile from the src tensor
230                // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
231                T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, ck, src_stride_y, my, a);
232
233                // Load tile from the weights tensor
234                // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
235                T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
236
237                // Compute the matrix multiplication between two tiles
238                T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
239
240#if defined(IS_QUANTIZED)
241                // Apply the offset correction (correction usually needed for asymmetric quantized computation)
242                // The computation is not performed if both SRC_OFFSET and WEI_OFFSET are zero
243                T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, 1, SRC_OFFSET, WEI_OFFSET, a, b, c);
244#endif // defined(IS_QUANTIZED)
245            }
246#endif // defined(LEFTOVER_LOOP)
247        }
248    }
249
250#if defined(IS_QUANTIZED)
251    const int total_pixels = floor((1 + y_start / (float)STRIDE_Y)) * floor(1 + x_start / (float)STRIDE_X);
252
253    T_ADD_CONSTANT(ACC_DATA_TYPE, M0, N0, c, (total_pixels * _ISRC_CHANNELS * SRC_OFFSET * WEI_OFFSET), c);
254#endif // defined(IS_QUANTIZED)
255
256#if defined(HAS_BIAS)
257    TILE(BIA_DATA_TYPE, 1, N0, bias0);
258
259    T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
260
261    // c = c + bias[broadcasted]
262    T_ELTWISE_BROADCAST_ADD_X(ACC_DATA_TYPE, M0, N0, c, bias0, c);
263
264#endif // HAS_BIAS
265
266#if defined(IS_QUANTIZED)
267
268    TILE(DST_DATA_TYPE, M0, N0, cq);
269
270    // Quantize the tile
271    T_QUANTIZE8_ASYMMETRIC(ACC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, c, cq);
272#endif // defined(IS_QUANTIZED)
273
274    TILE(uint, M0, 1, dst_indirect_y);
275
276    // Calculate the destination indirect Y
277    LOOP_UNROLLING(int, i, 0, 1, M0,
278    {
279        dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
280        dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
281    })
282
283    bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
284
285    // Store the tile in reverse order so the invalid values are overwritten with the valid ones
286    T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, _IOUTPUT_TILE, dst_indirect_y);
287
288#undef _IWEI_WIDTH
289#undef _IWEI_HEIGHT
290#undef _ISRC_WIDTH
291#undef _ISRC_HEIGHT
292#undef _ISRC_CHANNELS
293#undef _IDST_WIDTH
294#undef _IDST_HEIGHT
295#undef _IDST_CHANNELS
296#undef _IY_MULTIPLIER
297}
298