xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/cl_kernels/helpers.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2016-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_HELPER_H
25 #define ARM_COMPUTE_HELPER_H
26 
27 #include "load_store_utility.h"
28 
29 #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
30 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
31 #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
32 
33 #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
34 #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
35 #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
36 
37 #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
38 #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
39 #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
40 
41 #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
42 #pragma OPENCL EXTENSION cl_arm_printf : enable
43 #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
44 
45 #define GPU_ARCH_MIDGARD 0x100
46 #define GPU_ARCH_BIFROST 0x200
47 #define GPU_ARCH_VALHALL 0x300
48 
49 /** Concatenate two inputs.
50  *
51  * @param[in] a The first input to be concatenated
52  * @param[in] b The second input to be concatenated
53  *
54  * @return The concatenated output
55  */
56 #define CONCAT(a, b) a##b
57 
58 /** Expand the given vector
59  *
60  * @param[in] x The vector to be expanded
61  *
62  * @return The expanded output
63  */
64 #define EXPAND(x) x
65 
66 /** Clamp the given value between an upper and lower bound.
67  *
68  * @param[in] x       The value to be clamped
69  * @param[in] min_val The lower bound
70  * @param[in] max_val The upper bound
71  *
72  * @return The clamped value.
73  */
74 #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
75 
76 /** REVn reverses the given vector whose size is n.
77  * @name REVn
78  *
79  * @param[in] x The vector to be reversed
80  *
81  * @return The reversed vector
82  * @{
83  */
84 #define REV1(x) ((x))
85 #define REV2(x) ((x).s10)
86 #define REV3(x) ((x).s210)
87 #define REV4(x) ((x).s3210)
88 #define REV8(x) ((x).s76543210)
89 #define REV16(x) ((x).sFEDCBA9876543210)
90 /** @} */ // end of group REVn
91 
92 /** Reverse the given vector.
93  * @name REVERSE
94  *
95  * @param[in] x The vector to be reversed
96  * @param[in] s The size of the vector
97  *
98  * @return The reversed vector
99  * @{
100  */
101 #define REVERSE_STR(x, s) REV##s((x))
102 #define REVERSE(x, s) REVERSE_STR(x, s)
103 /** @} */ // end of group REVERSE
104 
105 /** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
106  * @name ROTs_n
107  *
108  * @param[in] x The vector to be shifted
109  *
110  * @return The shifted vector
111  * @{
112  */
113 #define ROT1_0(x) ((x))
114 #define ROT1_1(x) ((x))
115 
116 #define ROT2_0(x) ((x))
117 #define ROT2_1(x) ((x).s10)
118 #define ROT2_2(x) ((x))
119 
120 #define ROT3_0(x) ((x))
121 #define ROT3_1(x) ((x).s201)
122 #define ROT3_2(x) ((x).s120)
123 #define ROT3_3(x) ((x))
124 
125 #define ROT4_0(x) ((x))
126 #define ROT4_1(x) ((x).s3012)
127 #define ROT4_2(x) ((x).s2301)
128 #define ROT4_3(x) ((x).s1230)
129 #define ROT4_4(x) ((x))
130 
131 #define ROT8_0(x) ((x))
132 #define ROT8_1(x) ((x).s70123456)
133 #define ROT8_2(x) ((x).s67012345)
134 #define ROT8_3(x) ((x).s56701234)
135 #define ROT8_4(x) ((x).s45670123)
136 #define ROT8_5(x) ((x).s34567012)
137 #define ROT8_6(x) ((x).s23456701)
138 #define ROT8_7(x) ((x).s12345670)
139 #define ROT8_8(x) ((x))
140 
141 #define ROT16_0(x) ((x))
142 #define ROT16_1(x) ((x).sF0123456789ABCDE)
143 #define ROT16_2(x) ((x).sEF0123456789ABCD)
144 #define ROT16_3(x) ((x).sDEF0123456789ABC)
145 #define ROT16_4(x) ((x).sCDEF0123456789AB)
146 #define ROT16_5(x) ((x).sBCDEF0123456789A)
147 #define ROT16_6(x) ((x).sABCDEF0123456789)
148 #define ROT16_7(x) ((x).s9ABCDEF012345678)
149 #define ROT16_8(x) ((x).s89ABCDEF01234567)
150 #define ROT16_9(x) ((x).s789ABCDEF0123456)
151 #define ROT16_10(x) ((x).s6789ABCDEF012345)
152 #define ROT16_11(x) ((x).s56789ABCDEF01234)
153 #define ROT16_12(x) ((x).s456789ABCDEF0123)
154 #define ROT16_13(x) ((x).s3456789ABCDEF012)
155 #define ROT16_14(x) ((x).s23456789ABCDEF01)
156 #define ROT16_15(x) ((x).s123456789ABCDEF0)
157 #define ROT16_16(x) ((x))
158 /** @} */ // end of group ROTs_n
159 
160 /** Circular-right-shift (rotate-right) the given vector by the given amount.
161  * @name ROTATE
162  *
163  * @param[in] x The vector to be shifted
164  * @param[in] s The size of the vector
165  * @param[in] n The amount to be shifted
166  *
167  * @return The shifted vector
168  * @{
169  */
170 #define ROTATE_STR(x, s, n) ROT##s##_##n(x)
171 #define ROTATE(x, s, n) ROTATE_STR(x, s, n)
172 /** @} */ // end of group ROTATE
173 
174 /** Creates a vector of size n filled with offset values corresponding to the location of each element.
175  * @name V_OFFSn
176  *
177  * @param[in] dt The data type of the output vector
178  *
179  * @return The vector filled with offset values
180  * @{
181  */
182 #define V_OFFS1(dt) (dt##1)(0)
183 #define V_OFFS2(dt) (dt##2)(0, 1)
184 #define V_OFFS3(dt) (dt##3)(0, 1, 2)
185 #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
186 #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
187 #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
188 /** @} */ // end of group V_OFFSn
189 
190 /** Create a vector filled with offset values corresponding to the location of each element.
191  * @name VEC_OFFS
192  *
193  * @param[in] dt The data type of the output vector
194  * @param[in] s  The size of the output vector
195  *
196  * @return The vector filled with offset values
197  * @{
198  */
199 #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
200 #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
201 /** @} */ // end of group VEC_OFFS
202 
203 #define VLOAD_STR(size) vload##size
204 #define VLOAD(size) VLOAD_STR(size)
205 
206 /** Extended partial vload that correctly handles scalar values as well.
207  * Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of load ops
208  * @name VLOAD_PARTIAL
209  *
210  * @note With this macro, the passed data can be both a vector and a scalar
211  * @note @p load_size needs to be <= @p size
212  * eg 1: Valid
213  * VLOAD_PARTIAL(16, 15) ...;
214  * eg 2: Invalid
215  * VLOAD_PARTIAL(4, 7) ...;
216  *
217  * @param[in] size      The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
218  * @param[in] load_size The number of lower elements to load. Supported values: 1-16, but has to be <= @p size
219  * @{
220  */
221 #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
222 #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
223 
224 #define NO_LOAD(data, offs, ptr) \
225     {                            \
226     }
227 
228 // Size == 1 (scalar)
229 #define vload_partial_1_0 NO_LOAD
230 #define vload_partial_1_1 vload1
231 #define vload_partial_1_2 NO_LOAD
232 #define vload_partial_1_3 NO_LOAD
233 #define vload_partial_1_4 NO_LOAD
234 #define vload_partial_1_5 NO_LOAD
235 #define vload_partial_1_6 NO_LOAD
236 #define vload_partial_1_7 NO_LOAD
237 #define vload_partial_1_8 NO_LOAD
238 #define vload_partial_1_9 NO_LOAD
239 #define vload_partial_1_10 NO_LOAD
240 #define vload_partial_1_11 NO_LOAD
241 #define vload_partial_1_12 NO_LOAD
242 #define vload_partial_1_13 NO_LOAD
243 #define vload_partial_1_14 NO_LOAD
244 #define vload_partial_1_15 NO_LOAD
245 #define vload_partial_1_16 NO_LOAD
246 // Size == 2
247 #define vload_partial_2_0 NO_LOAD
248 #define vload_partial_2_1 vload_partial_1
249 #define vload_partial_2_2 vload_partial_2
250 #define vload_partial_2_3 NO_LOAD
251 #define vload_partial_2_4 NO_LOAD
252 #define vload_partial_2_5 NO_LOAD
253 #define vload_partial_2_6 NO_LOAD
254 #define vload_partial_2_7 NO_LOAD
255 #define vload_partial_2_8 NO_LOAD
256 #define vload_partial_2_9 NO_LOAD
257 #define vload_partial_2_10 NO_LOAD
258 #define vload_partial_2_11 NO_LOAD
259 #define vload_partial_2_12 NO_LOAD
260 #define vload_partial_2_13 NO_LOAD
261 #define vload_partial_2_14 NO_LOAD
262 #define vload_partial_2_15 NO_LOAD
263 #define vload_partial_2_16 NO_LOAD
264 // Size == 3
265 #define vload_partial_3_0 NO_LOAD
266 #define vload_partial_3_1 vload_partial_1
267 #define vload_partial_3_2 vload_partial_2
268 #define vload_partial_3_3 vload_partial_3
269 #define vload_partial_3_4 NO_LOAD
270 #define vload_partial_3_5 NO_LOAD
271 #define vload_partial_3_6 NO_LOAD
272 #define vload_partial_3_7 NO_LOAD
273 #define vload_partial_3_8 NO_LOAD
274 #define vload_partial_3_9 NO_LOAD
275 #define vload_partial_3_10 NO_LOAD
276 #define vload_partial_3_11 NO_LOAD
277 #define vload_partial_3_12 NO_LOAD
278 #define vload_partial_3_13 NO_LOAD
279 #define vload_partial_3_14 NO_LOAD
280 #define vload_partial_3_15 NO_LOAD
281 #define vload_partial_3_16 NO_LOAD
282 // Size == 4
283 #define vload_partial_4_0 NO_LOAD
284 #define vload_partial_4_1 vload_partial_1
285 #define vload_partial_4_2 vload_partial_2
286 #define vload_partial_4_3 vload_partial_3
287 #define vload_partial_4_4 vload_partial_4
288 #define vload_partial_4_5 NO_LOAD
289 #define vload_partial_4_6 NO_LOAD
290 #define vload_partial_4_7 NO_LOAD
291 #define vload_partial_4_8 NO_LOAD
292 #define vload_partial_4_9 NO_LOAD
293 #define vload_partial_4_10 NO_LOAD
294 #define vload_partial_4_11 NO_LOAD
295 #define vload_partial_4_12 NO_LOAD
296 #define vload_partial_4_13 NO_LOAD
297 #define vload_partial_4_14 NO_LOAD
298 #define vload_partial_4_15 NO_LOAD
299 #define vload_partial_4_16 NO_LOAD
300 // Size == 8
301 #define vload_partial_8_0 NO_LOAD
302 #define vload_partial_8_1 vload_partial_1
303 #define vload_partial_8_2 vload_partial_2
304 #define vload_partial_8_3 vload_partial_3
305 #define vload_partial_8_4 vload_partial_4
306 #define vload_partial_8_5 vload_partial_5
307 #define vload_partial_8_6 vload_partial_6
308 #define vload_partial_8_7 vload_partial_7
309 #define vload_partial_8_8 vload_partial_8
310 #define vload_partial_8_9 NO_LOAD
311 #define vload_partial_8_10 NO_LOAD
312 #define vload_partial_8_11 NO_LOAD
313 #define vload_partial_8_12 NO_LOAD
314 #define vload_partial_8_13 NO_LOAD
315 #define vload_partial_8_14 NO_LOAD
316 #define vload_partial_8_15 NO_LOAD
317 #define vload_partial_8_16 NO_LOAD
318 // Size == 16
319 #define vload_partial_16_0 NO_LOAD
320 #define vload_partial_16_1 vload_partial_1
321 #define vload_partial_16_2 vload_partial_2
322 #define vload_partial_16_3 vload_partial_3
323 #define vload_partial_16_4 vload_partial_4
324 #define vload_partial_16_5 vload_partial_5
325 #define vload_partial_16_6 vload_partial_6
326 #define vload_partial_16_7 vload_partial_7
327 #define vload_partial_16_8 vload_partial_8
328 #define vload_partial_16_9 vload_partial_9
329 #define vload_partial_16_10 vload_partial_10
330 #define vload_partial_16_11 vload_partial_11
331 #define vload_partial_16_12 vload_partial_12
332 #define vload_partial_16_13 vload_partial_13
333 #define vload_partial_16_14 vload_partial_14
334 #define vload_partial_16_15 vload_partial_15
335 #define vload_partial_16_16 vload_partial_16
336 
337 /** Partial vload. Load the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vload ops
338  * @name vload_partial_n
339  *
340  * @note @p DATA needs to be a vector not a scalar
341  * @note n needs to be <= the vector width of the input variable @p DATA
342  * eg 1: Valid
343  * vload_partial_15(var:float16, 0, 0xabcd);
344  * eg 2: Invalid
345  * vload_partial_7(var:float4, 0, 0xabcd);
346  *
347  * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vload is invoked, thus there's no performance penalty.
348  *
349  * @param[in] DATA   The name of the variable where to load the values
350  * @param[in] OFFSET Offset in n
351  * @param[in] PTR    The base pointer
352  * @{
353  */
354 #define vload_partial_1(DATA, OFFSET, PTR) \
355     DATA.s0 = vload1(OFFSET, PTR);
356 
357 #define vload_partial_2(DATA, OFFSET, PTR) \
358     DATA.s01 = vload2(OFFSET, PTR);
359 
360 #define vload_partial_3(DATA, OFFSET, PTR) \
361     DATA.s012 = vload3(OFFSET, PTR);
362 
363 #define vload_partial_4(DATA, OFFSET, PTR) \
364     DATA.s0123 = vload4(OFFSET, PTR);
365 
366 #define vload_partial_5(DATA, OFFSET, PTR)    \
367     vload_partial_4(DATA.s0123, OFFSET, PTR); \
368     DATA.s4 = vload1(OFFSET, PTR + 4);
369 
370 #define vload_partial_6(DATA, OFFSET, PTR)    \
371     vload_partial_4(DATA.s0123, OFFSET, PTR); \
372     vload_partial_2(DATA.s45, OFFSET, PTR + 4);
373 
374 #define vload_partial_7(DATA, OFFSET, PTR)    \
375     vload_partial_4(DATA.s0123, OFFSET, PTR); \
376     vload_partial_3(DATA.s456, OFFSET, PTR + 4);
377 
378 #define vload_partial_8(DATA, OFFSET, PTR) \
379     DATA.s01234567 = vload8(OFFSET, PTR);
380 
381 #define vload_partial_9(DATA, OFFSET, PTR)        \
382     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
383     DATA.s8 = vload1(OFFSET, PTR + 8);
384 
385 #define vload_partial_10(DATA, OFFSET, PTR)       \
386     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
387     vload_partial_2(DATA.s89, OFFSET, PTR + 8);
388 
389 #define vload_partial_11(DATA, OFFSET, PTR)       \
390     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
391     vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
392 
393 #define vload_partial_12(DATA, OFFSET, PTR)       \
394     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
395     vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
396 // For vload_partial_{13,14,15}, an 8-vector size has been passed, because vectors size of size 5,6,7 are not supported
397 #define vload_partial_13(DATA, OFFSET, PTR)       \
398     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
399     vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
400 
401 #define vload_partial_14(DATA, OFFSET, PTR)       \
402     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
403     vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
404 
405 #define vload_partial_15(DATA, OFFSET, PTR)       \
406     vload_partial_8(DATA.s01234567, OFFSET, PTR); \
407     vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
408 
409 #define vload_partial_16(DATA, OFFSET, PTR) \
410     DATA = vload16(OFFSET, PTR);
411 /** @} */ // end of groupd vload_partial_n
412 /** @} */ // end of groupd VLOAD_PARTIAL
413 
414 #define PIXEL_UNIT4 1
415 #define PIXEL_UNIT8 2
416 #define PIXEL_UNIT16 4
417 
418 /** Utility macro to convert a vector size in pixel unit.
419  *
420  * @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
421  *
422  * @param[in] vec_size Vector size. Only 4,8 and 16 is supported
423  *
424  * @return The pixel unit (number of pixels)
425  * @{
426  */
427 #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
428 #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
429 /** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
430 
431 #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
432 #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
433 #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
434 
435 #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
436 #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
437 #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
438 #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
439 #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
440 
441 #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values));
442 #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567));
443 #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
444 
445 #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
446 #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values));
447 #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567));
448 #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
449 #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
450 
451 /** Utility macro to read a 2D OpenCL image object.
452  *
453  * @note Coordinates are not normalized
454  *
455  * @param[in] data_type Data type
456  * @param[in] n0        Number of pixel to read. Only 1,2 and 4 is supported
457  * @param[in] img       OpenCL image object
458  * @param[in] x_coord   The x coordinate for the top-left pixel
459  * @param[in] y_coord   The y coordinate for the top-left pixel
460  *
461  * @return Pixels from the 2D OpenCL image object
462  * @{
463  */
464 #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
465 #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
466 
467 /** Utility macro to write a 2D OpenCL image object.
468  *
469  * @note Coordinates are not normalized
470  *
471  * @param[in] data_type Data type
472  * @param[in] n0        Number of pixel to write. Only 1,2 and 4 is supported
473  * @param[in] img       OpenCL image object
474  * @param[in] x_coord   The x coordinate for the top-left pixel
475  * @param[in] y_coord   The y coordinate for the top-left pixel
476  * @param[in] values    Values to write
477  *
478  * @{
479  */
480 #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values)
481 #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values)
482 
483 #define VSTORE_STR(size) vstore##size
484 #define VSTORE(size) VSTORE_STR(size)
485 
486 #define float1 float
487 #define half1 half
488 #define char1 char
489 #define uchar1 uchar
490 #define short1 short
491 #define ushort1 ushort
492 #define int1 int
493 #define uint1 uint
494 #define long1 long
495 #define ulong1 ulong
496 #define double1 double
497 
498 #define vload1(OFFSET, PTR) *(OFFSET + PTR)
499 #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
500 
501 /** Extended partial vstore that correctly handles scalar values as well.
502  * Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
503  * @name VSTORE_PARTIAL
504  *
505  * @note With this macro, the passed data can be both a vector and a scalar
506  * @note @p store_size needs to be <= @p size
507  * eg 1: Valid
508  * VSTORE_PARTIAL(16, 15) ...;
509  * eg 2: Invalid
510  * VSTORE_PARTIAL(4, 7) ...;
511  *
512  * @param[in] size       The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
513  * @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
514  * @{
515  */
516 #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
517 #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
518 
519 #define NO_STORE(data, offs, ptr) \
520     {                             \
521     }
522 
523 // Size == 1 (scalar)
524 #define vstore_partial_1_0 NO_STORE
525 #define vstore_partial_1_1 vstore1
526 #define vstore_partial_1_2 NO_STORE
527 #define vstore_partial_1_3 NO_STORE
528 #define vstore_partial_1_4 NO_STORE
529 #define vstore_partial_1_5 NO_STORE
530 #define vstore_partial_1_6 NO_STORE
531 #define vstore_partial_1_7 NO_STORE
532 #define vstore_partial_1_8 NO_STORE
533 #define vstore_partial_1_9 NO_STORE
534 #define vstore_partial_1_10 NO_STORE
535 #define vstore_partial_1_11 NO_STORE
536 #define vstore_partial_1_12 NO_STORE
537 #define vstore_partial_1_13 NO_STORE
538 #define vstore_partial_1_14 NO_STORE
539 #define vstore_partial_1_15 NO_STORE
540 #define vstore_partial_1_16 NO_STORE
541 // Size == 2
542 #define vstore_partial_2_0 NO_STORE
543 #define vstore_partial_2_1 vstore_partial_1
544 #define vstore_partial_2_2 vstore_partial_2
545 #define vstore_partial_2_3 NO_STORE
546 #define vstore_partial_2_4 NO_STORE
547 #define vstore_partial_2_5 NO_STORE
548 #define vstore_partial_2_6 NO_STORE
549 #define vstore_partial_2_7 NO_STORE
550 #define vstore_partial_2_8 NO_STORE
551 #define vstore_partial_2_9 NO_STORE
552 #define vstore_partial_2_10 NO_STORE
553 #define vstore_partial_2_11 NO_STORE
554 #define vstore_partial_2_12 NO_STORE
555 #define vstore_partial_2_13 NO_STORE
556 #define vstore_partial_2_14 NO_STORE
557 #define vstore_partial_2_15 NO_STORE
558 #define vstore_partial_2_16 NO_STORE
559 // Size == 3
560 #define vstore_partial_3_0 NO_STORE
561 #define vstore_partial_3_1 vstore_partial_1
562 #define vstore_partial_3_2 vstore_partial_2
563 #define vstore_partial_3_3 vstore_partial_3
564 #define vstore_partial_3_4 NO_STORE
565 #define vstore_partial_3_5 NO_STORE
566 #define vstore_partial_3_6 NO_STORE
567 #define vstore_partial_3_7 NO_STORE
568 #define vstore_partial_3_8 NO_STORE
569 #define vstore_partial_3_9 NO_STORE
570 #define vstore_partial_3_10 NO_STORE
571 #define vstore_partial_3_11 NO_STORE
572 #define vstore_partial_3_12 NO_STORE
573 #define vstore_partial_3_13 NO_STORE
574 #define vstore_partial_3_14 NO_STORE
575 #define vstore_partial_3_15 NO_STORE
576 #define vstore_partial_3_16 NO_STORE
577 // Size == 4
578 #define vstore_partial_4_0 NO_STORE
579 #define vstore_partial_4_1 vstore_partial_1
580 #define vstore_partial_4_2 vstore_partial_2
581 #define vstore_partial_4_3 vstore_partial_3
582 #define vstore_partial_4_4 vstore_partial_4
583 #define vstore_partial_4_5 NO_STORE
584 #define vstore_partial_4_6 NO_STORE
585 #define vstore_partial_4_7 NO_STORE
586 #define vstore_partial_4_8 NO_STORE
587 #define vstore_partial_4_9 NO_STORE
588 #define vstore_partial_4_10 NO_STORE
589 #define vstore_partial_4_11 NO_STORE
590 #define vstore_partial_4_12 NO_STORE
591 #define vstore_partial_4_13 NO_STORE
592 #define vstore_partial_4_14 NO_STORE
593 #define vstore_partial_4_15 NO_STORE
594 #define vstore_partial_4_16 NO_STORE
595 // Size == 8
596 #define vstore_partial_8_0 NO_STORE
597 #define vstore_partial_8_1 vstore_partial_1
598 #define vstore_partial_8_2 vstore_partial_2
599 #define vstore_partial_8_3 vstore_partial_3
600 #define vstore_partial_8_4 vstore_partial_4
601 #define vstore_partial_8_5 vstore_partial_5
602 #define vstore_partial_8_6 vstore_partial_6
603 #define vstore_partial_8_7 vstore_partial_7
604 #define vstore_partial_8_8 vstore_partial_8
605 #define vstore_partial_8_9 NO_STORE
606 #define vstore_partial_8_10 NO_STORE
607 #define vstore_partial_8_11 NO_STORE
608 #define vstore_partial_8_12 NO_STORE
609 #define vstore_partial_8_13 NO_STORE
610 #define vstore_partial_8_14 NO_STORE
611 #define vstore_partial_8_15 NO_STORE
612 #define vstore_partial_8_16 NO_STORE
613 // Size == 16
614 #define vstore_partial_16_0 NO_STORE
615 #define vstore_partial_16_1 vstore_partial_1
616 #define vstore_partial_16_2 vstore_partial_2
617 #define vstore_partial_16_3 vstore_partial_3
618 #define vstore_partial_16_4 vstore_partial_4
619 #define vstore_partial_16_5 vstore_partial_5
620 #define vstore_partial_16_6 vstore_partial_6
621 #define vstore_partial_16_7 vstore_partial_7
622 #define vstore_partial_16_8 vstore_partial_8
623 #define vstore_partial_16_9 vstore_partial_9
624 #define vstore_partial_16_10 vstore_partial_10
625 #define vstore_partial_16_11 vstore_partial_11
626 #define vstore_partial_16_12 vstore_partial_12
627 #define vstore_partial_16_13 vstore_partial_13
628 #define vstore_partial_16_14 vstore_partial_14
629 #define vstore_partial_16_15 vstore_partial_15
630 #define vstore_partial_16_16 vstore_partial_16
631 
632 /** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
633  * @name vstore_partial_n
634  *
635  * @note @p DATA needs to be a vector not a scalar
636  * @note n needs to be <= the vector width of the input variable @p DATA
637  * eg 1: Valid
638  * vstore_partial_15(var:float16, 0, 0xabcd);
639  * eg 2: Invalid
640  * vstore_partial_7(var:float4, 0, 0xabcd);
641  *
642  * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
643  *
644  * @param[in] DATA   The name of the variable
645  * @param[in] OFFSET Offset in n
646  * @param[in] PTR    The base pointer
647  * @{
648  */
649 #define vstore_partial_1(DATA, OFFSET, PTR) \
650     vstore1(DATA.s0, OFFSET, PTR);
651 
652 #define vstore_partial_2(DATA, OFFSET, PTR) \
653     vstore2(DATA.s01, OFFSET, PTR);
654 
655 #define vstore_partial_3(DATA, OFFSET, PTR) \
656     vstore3(DATA.s012, OFFSET, PTR);
657 
658 #define vstore_partial_4(DATA, OFFSET, PTR) \
659     vstore4(DATA.s0123, OFFSET, PTR);
660 
661 #define vstore_partial_5(DATA, OFFSET, PTR)    \
662     vstore_partial_4(DATA.s0123, OFFSET, PTR); \
663     vstore1(DATA.s4, OFFSET, PTR + 4);
664 
665 #define vstore_partial_6(DATA, OFFSET, PTR)    \
666     vstore_partial_4(DATA.s0123, OFFSET, PTR); \
667     vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
668 
669 #define vstore_partial_7(DATA, OFFSET, PTR)    \
670     vstore_partial_4(DATA.s0123, OFFSET, PTR); \
671     vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
672 
673 #define vstore_partial_8(DATA, OFFSET, PTR) \
674     vstore8(DATA.s01234567, OFFSET, PTR);
675 
676 #define vstore_partial_9(DATA, OFFSET, PTR)        \
677     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
678     vstore1(DATA.s8, OFFSET, PTR + 8);
679 
680 #define vstore_partial_10(DATA, OFFSET, PTR)       \
681     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
682     vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
683 
684 #define vstore_partial_11(DATA, OFFSET, PTR)       \
685     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
686     vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
687 
688 #define vstore_partial_12(DATA, OFFSET, PTR)       \
689     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
690     vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
691 
692 #define vstore_partial_13(DATA, OFFSET, PTR)       \
693     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
694     vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
695 
696 #define vstore_partial_14(DATA, OFFSET, PTR)       \
697     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
698     vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
699 
700 #define vstore_partial_15(DATA, OFFSET, PTR)       \
701     vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
702     vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
703 
704 #define vstore_partial_16(DATA, OFFSET, PTR) \
705     vstore16(DATA, OFFSET, PTR);
706 /** @} */ // end of groupd vstore_partial_n
707 /** @} */ // end of groupd VSTORE_PARTIAL
708 
709 // Convert built-in functions with _sat modifier are not supported in floating point so we create defines
710 // without _sat to overcome this issue
711 #define convert_float_sat convert_float
712 #define convert_float1_sat convert_float
713 #define convert_float2_sat convert_float2
714 #define convert_float3_sat convert_float3
715 #define convert_float4_sat convert_float4
716 #define convert_float8_sat convert_float8
717 #define convert_float16_sat convert_float16
718 #define convert_half_sat convert_float
719 #define convert_half1_sat convert_half
720 #define convert_half2_sat convert_half2
721 #define convert_half3_sat convert_half3
722 #define convert_half4_sat convert_half4
723 #define convert_half8_sat convert_half8
724 #define convert_half16_sat convert_half16
725 
726 #define convert_float1 convert_float
727 #define convert_half1 convert_half
728 #define convert_char1 convert_char
729 #define convert_uchar1 convert_uchar
730 #define convert_short1 convert_short
731 #define convert_ushort1 convert_ushort
732 #define convert_int1 convert_int
733 #define convert_uint1 convert_uint
734 #define convert_long1 convert_long
735 #define convert_ulong1 convert_ulong
736 #define convert_double1 convert_double
737 
738 #define convert_char1_sat convert_char_sat
739 #define convert_uchar1_sat convert_uchar_sat
740 #define convert_uchar2_sat convert_uchar2_sat
741 #define convert_uchar3_sat convert_uchar3_sat
742 #define convert_uchar4_sat convert_uchar4_sat
743 #define convert_uchar8_sat convert_uchar8_sat
744 #define convert_uchar16_sat convert_uchar16_sat
745 #define convert_short1_sat convert_short_sat
746 #define convert_ushort1_sat convert_ushort_sat
747 #define convert_int1_sat convert_int_sat
748 #define convert_uint1_sat convert_uint_sat
749 #define convert_long1_sat convert_long_sat
750 #define convert_ulong1_sat convert_ulong_sat
751 #define convert_double1_sat convert_double_sat
752 
753 #define VEC_DATA_TYPE_STR(type, size) type##size
754 #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
755 
756 #define CONVERT_STR(x, type) (convert_##type((x)))
757 #define CONVERT(x, type) CONVERT_STR(x, type)
758 
759 #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
760 #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
761 
762 #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
763 #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
764 
765 #define select_vec_dt_uchar(size) uchar##size
766 #define select_vec_dt_char(size) char##size
767 #define select_vec_dt_ushort(size) ushort##size
768 #define select_vec_dt_short(size) short##size
769 #define select_vec_dt_half(size) short##size
770 #define select_vec_dt_uint(size) uint##size
771 #define select_vec_dt_int(size) int##size
772 #define select_vec_dt_float(size) int##size
773 #define select_vec_dt_ulong(size) ulong##size
774 #define select_vec_dt_long(size) long##size
775 
776 #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
777 #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
778 #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
779 
780 #define signed_int_vec_dt_uchar(size) char##size
781 #define signed_int_vec_dt_char(size) char##size
782 #define signed_int_vec_dt_ushort(size) short##size
783 #define signed_int_vec_dt_short(size) short##size
784 #define signed_int_vec_dt_half(size) short##size
785 #define signed_int_vec_dt_uint(size) int##size
786 #define signed_int_vec_dt_int(size) int##size
787 #define signed_int_vec_dt_float(size) int##size
788 #define signed_int_vec_dt_ulong(size) long##size
789 #define signed_int_vec_dt_long(size) long##size
790 
791 #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
792 #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
793 #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
794 
795 #define sum_reduce_1(x) (x)
796 #define sum_reduce_2(x) ((x).s0) + ((x).s1)
797 #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
798 #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
799 #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
800 #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
801 
802 #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
803 #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
804 
805 #define prod_reduce_1(x) (x)
806 #define prod_reduce_2(x) ((x).s0) * ((x).s1)
807 #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
808 #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
809 #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
810 #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
811 
812 #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
813 #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
814 
815 #define max_reduce_1(x) (x)
816 #define max_reduce_2(x) max(((x).s0), ((x).s1))
817 #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
818 #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
819 #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
820 #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
821 
822 #define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
823 #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
824 
825 #define VECTOR_DECLARATION(name)     \
826     __global uchar *name##_ptr,      \
827     uint        name##_stride_x, \
828     uint        name##_step_x,   \
829     uint        name##_offset_first_element_in_bytes
830 
831 #define IMAGE_DECLARATION(name)      \
832     __global uchar *name##_ptr,      \
833     uint        name##_stride_x, \
834     uint        name##_step_x,   \
835     uint        name##_stride_y, \
836     uint        name##_step_y,   \
837     uint        name##_offset_first_element_in_bytes
838 
839 #define TENSOR3D_DECLARATION(name)   \
840     __global uchar *name##_ptr,      \
841     uint        name##_stride_x, \
842     uint        name##_step_x,   \
843     uint        name##_stride_y, \
844     uint        name##_step_y,   \
845     uint        name##_stride_z, \
846     uint        name##_step_z,   \
847     uint        name##_offset_first_element_in_bytes
848 
849 #define TENSOR4D_DECLARATION(name)   \
850     __global uchar *name##_ptr,      \
851     uint        name##_stride_x, \
852     uint        name##_step_x,   \
853     uint        name##_stride_y, \
854     uint        name##_step_y,   \
855     uint        name##_stride_z, \
856     uint        name##_step_z,   \
857     uint        name##_stride_w, \
858     uint        name##_step_w,   \
859     uint        name##_offset_first_element_in_bytes
860 
861 #define TENSOR5D_DECLARATION(name)   \
862     __global uchar *name##_ptr,      \
863     uint        name##_stride_x, \
864     uint        name##_step_x,   \
865     uint        name##_stride_y, \
866     uint        name##_step_y,   \
867     uint        name##_stride_z, \
868     uint        name##_step_z,   \
869     uint        name##_stride_w, \
870     uint        name##_step_w,   \
871     uint        name##_stride_v, \
872     uint        name##_step_v,   \
873     uint        name##_offset_first_element_in_bytes
874 
875 #define CONVERT_TO_VECTOR_STRUCT(name) \
876     update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
877 
878 #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
879     update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
880 
881 #define CONVERT_TO_IMAGE_STRUCT(name) \
882     update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
883 
884 #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
885     update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
886 
887 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
888     update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
889 
890 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
891     update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
892 
893 #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
894     update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
895 
896 #define CONVERT_TO_TENSOR3D_STRUCT(name)                                                                                                           \
897     update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
898                                  name##_stride_z, name##_step_z)
899 
900 #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
901     update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
902 
903 #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size)                                                                                                 \
904     update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
905                                  name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
906 
907 #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
908     update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
909 
910 #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name)                                                                                       \
911     tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
912                            name##_stride_z, name##_step_z)
913 
914 /** Structure to hold Vector information */
915 typedef struct Vector
916 {
917     __global uchar *ptr;                           /**< Pointer to the starting postion of the buffer */
918     int             offset_first_element_in_bytes; /**< The offset of the first element in the source image */
919     int             stride_x;                      /**< Stride of the image in X dimension (in bytes) */
920 } Vector;
921 
922 /** Structure to hold Image information */
923 typedef struct Image
924 {
925     __global uchar *ptr;                           /**< Pointer to the starting postion of the buffer */
926     int             offset_first_element_in_bytes; /**< The offset of the first element in the source image */
927     int             stride_x;                      /**< Stride of the image in X dimension (in bytes) */
928     int             stride_y;                      /**< Stride of the image in Y dimension (in bytes) */
929 } Image;
930 
931 /** Structure to hold 3D tensor information */
932 typedef struct Tensor3D
933 {
934     __global uchar *ptr;                           /**< Pointer to the starting postion of the buffer */
935     int             offset_first_element_in_bytes; /**< The offset of the first element in the source image */
936     int             stride_x;                      /**< Stride of the image in X dimension (in bytes) */
937     int             stride_y;                      /**< Stride of the image in Y dimension (in bytes) */
938     int             stride_z;                      /**< Stride of the image in Z dimension (in bytes) */
939 } Tensor3D;
940 
941 /** Structure to hold 4D tensor information */
942 typedef struct Tensor4D
943 {
944     __global uchar *ptr;                           /**< Pointer to the starting postion of the buffer */
945     int             offset_first_element_in_bytes; /**< The offset of the first element in the source image */
946     int             stride_x;                      /**< Stride of the image in X dimension (in bytes) */
947     int             stride_y;                      /**< Stride of the image in Y dimension (in bytes) */
948     int             stride_z;                      /**< Stride of the image in Z dimension (in bytes) */
949     int             stride_w;                      /**< Stride of the image in W dimension (in bytes) */
950 } Tensor4D;
951 
952 /** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
953  *
954  * @param[in] ptr                           Pointer to the starting postion of the buffer
955  * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
956  * @param[in] stride_x                      Stride of the vector in X dimension (in bytes)
957  * @param[in] step_x                        stride_x * number of elements along X processed per workitem(in bytes)
958  *
959  * @return An image object
960  */
update_vector_workitem_ptr(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x)961 inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
962 {
963     Vector vector =
964     {
965         .ptr                           = ptr,
966         .offset_first_element_in_bytes = offset_first_element_in_bytes,
967         .stride_x                      = stride_x,
968     };
969     vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
970     return vector;
971 }
972 
973 /** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
974  *
975  * @param[in] ptr                           Pointer to the starting postion of the buffer
976  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
977  * @param[in] stride_x                      Stride of the image in X dimension (in bytes)
978  * @param[in] step_x                        stride_x * number of elements along X processed per workitem(in bytes)
979  * @param[in] stride_y                      Stride of the image in Y dimension (in bytes)
980  * @param[in] step_y                        stride_y * number of elements along Y processed per workitem(in bytes)
981  *
982  * @return An image object
983  */
update_image_workitem_ptr(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x,uint stride_y,uint step_y)984 inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
985 {
986     Image img =
987     {
988         .ptr                           = ptr,
989         .offset_first_element_in_bytes = offset_first_element_in_bytes,
990         .stride_x                      = stride_x,
991         .stride_y                      = stride_y
992     };
993     img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
994     return img;
995 }
996 
997 /** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
998  *
999  * @param[in] ptr                           Pointer to the starting postion of the buffer
1000  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
1001  * @param[in] stride_x                      Stride of the image in X dimension (in bytes)
1002  * @param[in] step_x                        stride_x * number of elements along X processed per workitem(in bytes)
1003  * @param[in] stride_y                      Stride of the image in Y dimension (in bytes)
1004  * @param[in] step_y                        stride_y * number of elements along Y processed per workitem(in bytes)
1005  * @param[in] stride_z                      Stride of the image in Z dimension (in bytes)
1006  * @param[in] step_z                        stride_z * number of elements along Z processed per workitem(in bytes)
1007  *
1008  * @return A 3D tensor object
1009  */
update_image_from_tensor3D_workitem_ptr(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x,uint stride_y,uint step_y,uint stride_z,uint step_z)1010 inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1011 {
1012     Image img =
1013     {
1014         .ptr                           = ptr,
1015         .offset_first_element_in_bytes = offset_first_element_in_bytes,
1016         .stride_x                      = stride_x,
1017         .stride_y                      = stride_y
1018     };
1019     img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
1020     return img;
1021 }
1022 
1023 /** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
1024  *
1025  * @param[in] ptr                           Pointer to the starting postion of the buffer
1026  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
1027  * @param[in] stride_x                      Stride of the image in X dimension (in bytes)
1028  * @param[in] step_x                        stride_x * number of elements along X processed per workitem(in bytes)
1029  * @param[in] stride_y                      Stride of the image in Y dimension (in bytes)
1030  * @param[in] step_y                        stride_y * number of elements along Y processed per workitem(in bytes)
1031  * @param[in] stride_z                      Stride of the image in Z dimension (in bytes)
1032  * @param[in] step_z                        stride_z * number of elements along Z processed per workitem(in bytes)
1033  *
1034  * @return A 3D tensor object
1035  */
update_tensor3D_workitem_ptr(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x,uint stride_y,uint step_y,uint stride_z,uint step_z)1036 inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1037 {
1038     Tensor3D tensor =
1039     {
1040         .ptr                           = ptr,
1041         .offset_first_element_in_bytes = offset_first_element_in_bytes,
1042         .stride_x                      = stride_x,
1043         .stride_y                      = stride_y,
1044         .stride_z                      = stride_z
1045     };
1046     tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
1047     return tensor;
1048 }
1049 
1050 /** Wrap 3D tensor information into an tensor structure.
1051  *
1052  * @param[in] ptr                           Pointer to the starting postion of the buffer
1053  * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
1054  * @param[in] stride_x                      Stride of the image in X dimension (in bytes)
1055  * @param[in] step_x                        stride_x * number of elements along X processed per workitem(in bytes)
1056  * @param[in] stride_y                      Stride of the image in Y dimension (in bytes)
1057  * @param[in] step_y                        stride_y * number of elements along Y processed per workitem(in bytes)
1058  * @param[in] stride_z                      Stride of the image in Z dimension (in bytes)
1059  * @param[in] step_z                        stride_z * number of elements along Z processed per workitem(in bytes)
1060  *
1061  * @return A 3D tensor object
1062  */
tensor3D_ptr_no_update(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x,uint stride_y,uint step_y,uint stride_z,uint step_z)1063 inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1064 {
1065     Tensor3D tensor =
1066     {
1067         .ptr                           = ptr,
1068         .offset_first_element_in_bytes = offset_first_element_in_bytes,
1069         .stride_x                      = stride_x,
1070         .stride_y                      = stride_y,
1071         .stride_z                      = stride_z
1072     };
1073     return tensor;
1074 }
1075 
update_tensor4D_workitem_ptr(__global uchar * ptr,uint offset_first_element_in_bytes,uint stride_x,uint step_x,uint stride_y,uint step_y,uint stride_z,uint step_z,uint stride_w,uint step_w,uint mod_size)1076 inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
1077                                              uint step_w,
1078                                              uint mod_size)
1079 {
1080     Tensor4D tensor =
1081     {
1082         .ptr                           = ptr,
1083         .offset_first_element_in_bytes = offset_first_element_in_bytes,
1084         .stride_x                      = stride_x,
1085         .stride_y                      = stride_y,
1086         .stride_z                      = stride_z,
1087         .stride_w                      = stride_w
1088     };
1089 
1090     tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
1091     return tensor;
1092 }
1093 
1094 /** Get the pointer position of a Vector
1095  *
1096  * @param[in] vec Pointer to the starting position of the buffer
1097  * @param[in] x   Relative X position
1098  */
vector_offset(const Vector * vec,int x)1099 inline __global const uchar *vector_offset(const Vector *vec, int x)
1100 {
1101     return vec->ptr + x * vec->stride_x;
1102 }
1103 
1104 /** Get the pointer position of a Image
1105  *
1106  * @param[in] img Pointer to the starting position of the buffer
1107  * @param[in] x   Relative X position
1108  * @param[in] y   Relative Y position
1109  */
offset(const Image * img,int x,int y)1110 inline __global uchar *offset(const Image *img, int x, int y)
1111 {
1112     return img->ptr + x * img->stride_x + y * img->stride_y;
1113 }
1114 
1115 /** Get the pointer position of a Tensor3D
1116  *
1117  * @param[in] tensor Pointer to the starting position of the buffer
1118  * @param[in] x      Relative X position
1119  * @param[in] y      Relative Y position
1120  * @param[in] z      Relative Z position
1121  */
tensor3D_offset(const Tensor3D * tensor,int x,int y,int z)1122 inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
1123 {
1124     return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
1125 }
1126 
1127 /** Get the pointer position of a Tensor4D
1128  *
1129  * @param[in] tensor Pointer to the starting position of the buffer
1130  * @param[in] x      Relative X position
1131  * @param[in] y      Relative Y position
1132  * @param[in] z      Relative Z position
1133  * @param[in] w      Relative W position
1134  */
tensor4D_offset(const Tensor4D * tensor,int x,int y,int z,int w)1135 inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
1136 {
1137     return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
1138 }
1139 
1140 /** Get the offset for a given linear index of a Tensor3D
1141  *
1142  * @param[in] tensor Pointer to the starting position of the buffer
1143  * @param[in] width  Width of the input tensor
1144  * @param[in] height Height of the input tensor
1145  * @param[in] depth  Depth of the input tensor
1146  * @param[in] index  Linear index
1147  */
tensor3D_index2ptr(const Tensor3D * tensor,uint width,uint height,uint depth,uint index)1148 inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
1149 {
1150     uint num_elements = width * height;
1151 
1152     const uint z = index / num_elements;
1153 
1154     index %= num_elements;
1155 
1156     const uint y = index / width;
1157 
1158     index %= width;
1159 
1160     const uint x = index;
1161 
1162     return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
1163 }
1164 
1165 #endif // _HELPER_H
1166