xref: /aosp_15_r20/external/ComputeLibrary/cl_kernels/common/bounding_box_transform_quantized.clembed (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1R"(
2
3#ifndef ARM_COMPUTE_HELPERS_ASYMM_H
4#define ARM_COMPUTE_HELPERS_ASYMM_H
5
6
7#ifndef ARM_COMPUTE_HELPER_H
8#define ARM_COMPUTE_HELPER_H
9
10
11
12
13#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
14    VSTORE(N0)                                                 \
15    (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
16
17#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
18    STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
19    VSTORE(N0)                                                 \
20    (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
21
22#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
23    STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
24    VSTORE(N0)                                                 \
25    (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
26
27#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
28    STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
29    VSTORE(N0)                                                 \
30    (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
31
32#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
33    STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
34    VSTORE(N0)                                                 \
35    (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
36
37#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
38    STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
39    VSTORE(N0)                                                 \
40    (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
41
42#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
43    STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
44    VSTORE(N0)                                                 \
45    (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
46
47#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
48    STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
49    VSTORE(N0)                                                 \
50    (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
51
52#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
53    STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
54    VSTORE(N0)                                                 \
55    (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
56
57#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
58    STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)      \
59    VSTORE(N0)                                                  \
60    (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
61
62#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
63    STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
64    VSTORE(N0)                                                  \
65    (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
66
67#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
68    STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
69    VSTORE(N0)                                                  \
70    (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
71
72#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
73    STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
74    VSTORE(N0)                                                  \
75    (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
76
77#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
78    STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
79    VSTORE(N0)                                                  \
80    (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
81
82#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
83    STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
84    VSTORE(N0)                                                  \
85    (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
86
87#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
88    STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
89    VSTORE(N0)                                                  \
90    (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
91
92
93
94#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
95    VSTORE(N0)                                                         \
96    (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
97
98#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
99    CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
100    VSTORE(N0)                                                         \
101    (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
102
103#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
104    CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
105    VSTORE(N0)                                                         \
106    (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
107
108#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
109    CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
110    VSTORE(N0)                                                         \
111    (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
112
113#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
114    CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
115    VSTORE(N0)                                                         \
116    (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
117
118#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
119    CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
120    VSTORE(N0)                                                         \
121    (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
122
123#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
124    CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
125    VSTORE(N0)                                                         \
126    (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
127
128#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
129    CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
130    VSTORE(N0)                                                         \
131    (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
132
133#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
134    CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
135    VSTORE(N0)                                                         \
136    (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
137
138#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
139    CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
140    VSTORE(N0)                                                     \
141    (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
142
143#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
144    CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
145    VSTORE(N0)                                                          \
146    (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
147
148#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
149    CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
150    VSTORE(N0)                                                          \
151    (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
152
153#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
154    CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
155    VSTORE(N0)                                                          \
156    (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
157
158#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
159    CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
160    VSTORE(N0)                                                          \
161    (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
162
163#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
164    CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
165    VSTORE(N0)                                                          \
166    (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
167
168#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
169    CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
170    VSTORE(N0)                                                          \
171    (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
172
173
174
175
176#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
177#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
178
179
180
181#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
182#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
183
184
185
186#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
187    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
188    (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
189
190#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
191    STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
192    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
193    (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
194
195#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
196    STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
197    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
198    (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
199
200#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
201    STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
202    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
203    (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
204
205#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
206    STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
207    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
208    (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
209
210#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
211    STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
212    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
213    (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
214
215#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
216    STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
217    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
218    (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
219
220#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
221    STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
222    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
223    (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
224
225#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
226    STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
227    VSTORE_PARTIAL(N0, STORE_N0)                                                 \
228    (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
229
230#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
231    STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)      \
232    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
233    (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
234
235#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
236    STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
237    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
238    (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
239
240#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
241    STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
242    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
243    (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
244
245#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
246    STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
247    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
248    (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
249
250#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
251    STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
252    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
253    (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
254
255#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
256    STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
257    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
258    (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
259
260#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
261    STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)     \
262    VSTORE_PARTIAL(N0, STORE_N0)                                                  \
263    (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
264
265
266
267#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
268#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
269
270#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
271    if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y))                                                                                                            \
272    {                                                                                                                                                     \
273        STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                                                           \
274    }                                                                                                                                                     \
275    else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X))                                                                                                        \
276    {                                                                                                                                                     \
277        STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                                             \
278    }                                                                                                                                                     \
279    else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X))                                                                                                        \
280    {                                                                                                                                                     \
281        STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                                             \
282    }                                                                                                                                                     \
283    else                                                                                                                                                  \
284    {                                                                                                                                                     \
285        STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                               \
286    }
287
288#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
289    if(!(PARTIAL_COND_X))                                                                                         \
290    {                                                                                                             \
291        STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                   \
292    }                                                                                                             \
293    else                                                                                                          \
294    {                                                                                                             \
295        STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                     \
296    }
297
298#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
299    if(!(PARTIAL_COND_Y))                                                                                         \
300    {                                                                                                             \
301        STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                                   \
302    }                                                                                                             \
303    else                                                                                                          \
304    {                                                                                                             \
305        STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z);                     \
306    }
307
308
309#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
310
311
312#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
313
314#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
315    STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
316
317#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
318
319#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
320    STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
321
322#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
323
324#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
325    STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
326
327#else
328
329#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
330    STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
331
332#endif
333
334#endif
335
336
337#if defined(PARTIAL_STORE_M0)
338
339#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
340    ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
341#else
342#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
343    ((uint)(y * M0))
344#endif
345
346
347
348#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
349    STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
350
351
352#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
353#pragma OPENCL EXTENSION cl_khr_fp16 : enable
354#endif
355
356#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
357#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
358#endif
359
360#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
361#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
362#endif
363
364#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
365#pragma OPENCL EXTENSION cl_arm_printf : enable
366#endif
367
368#define GPU_ARCH_MIDGARD 0x100
369#define GPU_ARCH_BIFROST 0x200
370#define GPU_ARCH_VALHALL 0x300
371
372
373#define CONCAT(a, b) a##b
374
375
376#define EXPAND(x) x
377
378
379#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
380
381
382#define REV1(x) ((x))
383#define REV2(x) ((x).s10)
384#define REV3(x) ((x).s210)
385#define REV4(x) ((x).s3210)
386#define REV8(x) ((x).s76543210)
387#define REV16(x) ((x).sFEDCBA9876543210)
388
389
390
391#define REVERSE_STR(x, s) REV##s((x))
392#define REVERSE(x, s) REVERSE_STR(x, s)
393
394
395
396#define ROT1_0(x) ((x))
397#define ROT1_1(x) ((x))
398
399#define ROT2_0(x) ((x))
400#define ROT2_1(x) ((x).s10)
401#define ROT2_2(x) ((x))
402
403#define ROT3_0(x) ((x))
404#define ROT3_1(x) ((x).s201)
405#define ROT3_2(x) ((x).s120)
406#define ROT3_3(x) ((x))
407
408#define ROT4_0(x) ((x))
409#define ROT4_1(x) ((x).s3012)
410#define ROT4_2(x) ((x).s2301)
411#define ROT4_3(x) ((x).s1230)
412#define ROT4_4(x) ((x))
413
414#define ROT8_0(x) ((x))
415#define ROT8_1(x) ((x).s70123456)
416#define ROT8_2(x) ((x).s67012345)
417#define ROT8_3(x) ((x).s56701234)
418#define ROT8_4(x) ((x).s45670123)
419#define ROT8_5(x) ((x).s34567012)
420#define ROT8_6(x) ((x).s23456701)
421#define ROT8_7(x) ((x).s12345670)
422#define ROT8_8(x) ((x))
423
424#define ROT16_0(x) ((x))
425#define ROT16_1(x) ((x).sF0123456789ABCDE)
426#define ROT16_2(x) ((x).sEF0123456789ABCD)
427#define ROT16_3(x) ((x).sDEF0123456789ABC)
428#define ROT16_4(x) ((x).sCDEF0123456789AB)
429#define ROT16_5(x) ((x).sBCDEF0123456789A)
430#define ROT16_6(x) ((x).sABCDEF0123456789)
431#define ROT16_7(x) ((x).s9ABCDEF012345678)
432#define ROT16_8(x) ((x).s89ABCDEF01234567)
433#define ROT16_9(x) ((x).s789ABCDEF0123456)
434#define ROT16_10(x) ((x).s6789ABCDEF012345)
435#define ROT16_11(x) ((x).s56789ABCDEF01234)
436#define ROT16_12(x) ((x).s456789ABCDEF0123)
437#define ROT16_13(x) ((x).s3456789ABCDEF012)
438#define ROT16_14(x) ((x).s23456789ABCDEF01)
439#define ROT16_15(x) ((x).s123456789ABCDEF0)
440#define ROT16_16(x) ((x))
441
442
443
444#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
445#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
446
447
448
449#define V_OFFS1(dt) (dt##1)(0)
450#define V_OFFS2(dt) (dt##2)(0, 1)
451#define V_OFFS3(dt) (dt##3)(0, 1, 2)
452#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
453#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
454#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
455
456
457
458#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
459#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
460
461
462#define VLOAD_STR(size) vload##size
463#define VLOAD(size) VLOAD_STR(size)
464
465
466#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
467#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
468
469#define NO_LOAD(data, offs, ptr) \
470    {                            \
471    }
472
473
474#define vload_partial_1_0 NO_LOAD
475#define vload_partial_1_1 vload1
476#define vload_partial_1_2 NO_LOAD
477#define vload_partial_1_3 NO_LOAD
478#define vload_partial_1_4 NO_LOAD
479#define vload_partial_1_5 NO_LOAD
480#define vload_partial_1_6 NO_LOAD
481#define vload_partial_1_7 NO_LOAD
482#define vload_partial_1_8 NO_LOAD
483#define vload_partial_1_9 NO_LOAD
484#define vload_partial_1_10 NO_LOAD
485#define vload_partial_1_11 NO_LOAD
486#define vload_partial_1_12 NO_LOAD
487#define vload_partial_1_13 NO_LOAD
488#define vload_partial_1_14 NO_LOAD
489#define vload_partial_1_15 NO_LOAD
490#define vload_partial_1_16 NO_LOAD
491
492#define vload_partial_2_0 NO_LOAD
493#define vload_partial_2_1 vload_partial_1
494#define vload_partial_2_2 vload_partial_2
495#define vload_partial_2_3 NO_LOAD
496#define vload_partial_2_4 NO_LOAD
497#define vload_partial_2_5 NO_LOAD
498#define vload_partial_2_6 NO_LOAD
499#define vload_partial_2_7 NO_LOAD
500#define vload_partial_2_8 NO_LOAD
501#define vload_partial_2_9 NO_LOAD
502#define vload_partial_2_10 NO_LOAD
503#define vload_partial_2_11 NO_LOAD
504#define vload_partial_2_12 NO_LOAD
505#define vload_partial_2_13 NO_LOAD
506#define vload_partial_2_14 NO_LOAD
507#define vload_partial_2_15 NO_LOAD
508#define vload_partial_2_16 NO_LOAD
509
510#define vload_partial_3_0 NO_LOAD
511#define vload_partial_3_1 vload_partial_1
512#define vload_partial_3_2 vload_partial_2
513#define vload_partial_3_3 vload_partial_3
514#define vload_partial_3_4 NO_LOAD
515#define vload_partial_3_5 NO_LOAD
516#define vload_partial_3_6 NO_LOAD
517#define vload_partial_3_7 NO_LOAD
518#define vload_partial_3_8 NO_LOAD
519#define vload_partial_3_9 NO_LOAD
520#define vload_partial_3_10 NO_LOAD
521#define vload_partial_3_11 NO_LOAD
522#define vload_partial_3_12 NO_LOAD
523#define vload_partial_3_13 NO_LOAD
524#define vload_partial_3_14 NO_LOAD
525#define vload_partial_3_15 NO_LOAD
526#define vload_partial_3_16 NO_LOAD
527
528#define vload_partial_4_0 NO_LOAD
529#define vload_partial_4_1 vload_partial_1
530#define vload_partial_4_2 vload_partial_2
531#define vload_partial_4_3 vload_partial_3
532#define vload_partial_4_4 vload_partial_4
533#define vload_partial_4_5 NO_LOAD
534#define vload_partial_4_6 NO_LOAD
535#define vload_partial_4_7 NO_LOAD
536#define vload_partial_4_8 NO_LOAD
537#define vload_partial_4_9 NO_LOAD
538#define vload_partial_4_10 NO_LOAD
539#define vload_partial_4_11 NO_LOAD
540#define vload_partial_4_12 NO_LOAD
541#define vload_partial_4_13 NO_LOAD
542#define vload_partial_4_14 NO_LOAD
543#define vload_partial_4_15 NO_LOAD
544#define vload_partial_4_16 NO_LOAD
545
546#define vload_partial_8_0 NO_LOAD
547#define vload_partial_8_1 vload_partial_1
548#define vload_partial_8_2 vload_partial_2
549#define vload_partial_8_3 vload_partial_3
550#define vload_partial_8_4 vload_partial_4
551#define vload_partial_8_5 vload_partial_5
552#define vload_partial_8_6 vload_partial_6
553#define vload_partial_8_7 vload_partial_7
554#define vload_partial_8_8 vload_partial_8
555#define vload_partial_8_9 NO_LOAD
556#define vload_partial_8_10 NO_LOAD
557#define vload_partial_8_11 NO_LOAD
558#define vload_partial_8_12 NO_LOAD
559#define vload_partial_8_13 NO_LOAD
560#define vload_partial_8_14 NO_LOAD
561#define vload_partial_8_15 NO_LOAD
562#define vload_partial_8_16 NO_LOAD
563
564#define vload_partial_16_0 NO_LOAD
565#define vload_partial_16_1 vload_partial_1
566#define vload_partial_16_2 vload_partial_2
567#define vload_partial_16_3 vload_partial_3
568#define vload_partial_16_4 vload_partial_4
569#define vload_partial_16_5 vload_partial_5
570#define vload_partial_16_6 vload_partial_6
571#define vload_partial_16_7 vload_partial_7
572#define vload_partial_16_8 vload_partial_8
573#define vload_partial_16_9 vload_partial_9
574#define vload_partial_16_10 vload_partial_10
575#define vload_partial_16_11 vload_partial_11
576#define vload_partial_16_12 vload_partial_12
577#define vload_partial_16_13 vload_partial_13
578#define vload_partial_16_14 vload_partial_14
579#define vload_partial_16_15 vload_partial_15
580#define vload_partial_16_16 vload_partial_16
581
582
583#define vload_partial_1(DATA, OFFSET, PTR) \
584    DATA.s0 = vload1(OFFSET, PTR);
585
586#define vload_partial_2(DATA, OFFSET, PTR) \
587    DATA.s01 = vload2(OFFSET, PTR);
588
589#define vload_partial_3(DATA, OFFSET, PTR) \
590    DATA.s012 = vload3(OFFSET, PTR);
591
592#define vload_partial_4(DATA, OFFSET, PTR) \
593    DATA.s0123 = vload4(OFFSET, PTR);
594
595#define vload_partial_5(DATA, OFFSET, PTR)    \
596    vload_partial_4(DATA.s0123, OFFSET, PTR); \
597    DATA.s4 = vload1(OFFSET, PTR + 4);
598
599#define vload_partial_6(DATA, OFFSET, PTR)    \
600    vload_partial_4(DATA.s0123, OFFSET, PTR); \
601    vload_partial_2(DATA.s45, OFFSET, PTR + 4);
602
603#define vload_partial_7(DATA, OFFSET, PTR)    \
604    vload_partial_4(DATA.s0123, OFFSET, PTR); \
605    vload_partial_3(DATA.s456, OFFSET, PTR + 4);
606
607#define vload_partial_8(DATA, OFFSET, PTR) \
608    DATA.s01234567 = vload8(OFFSET, PTR);
609
610#define vload_partial_9(DATA, OFFSET, PTR)        \
611    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
612    DATA.s8 = vload1(OFFSET, PTR + 8);
613
614#define vload_partial_10(DATA, OFFSET, PTR)       \
615    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
616    vload_partial_2(DATA.s89, OFFSET, PTR + 8);
617
618#define vload_partial_11(DATA, OFFSET, PTR)       \
619    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
620    vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
621
622#define vload_partial_12(DATA, OFFSET, PTR)       \
623    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
624    vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
625
626#define vload_partial_13(DATA, OFFSET, PTR)       \
627    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
628    vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
629
630#define vload_partial_14(DATA, OFFSET, PTR)       \
631    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
632    vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
633
634#define vload_partial_15(DATA, OFFSET, PTR)       \
635    vload_partial_8(DATA.s01234567, OFFSET, PTR); \
636    vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
637
638#define vload_partial_16(DATA, OFFSET, PTR) \
639    DATA = vload16(OFFSET, PTR);
640
641
642
643#define PIXEL_UNIT4 1
644#define PIXEL_UNIT8 2
645#define PIXEL_UNIT16 4
646
647
648#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
649#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
650
651
652#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
653#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
654#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
655
656#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
657#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
658#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
659#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
660#endif
661
662#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values));
663#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567));
664#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
665
666#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
667#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values));
668#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567));
669#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
670#endif
671
672
673#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
674#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
675
676
677#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values)
678#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values)
679
680#define VSTORE_STR(size) vstore##size
681#define VSTORE(size) VSTORE_STR(size)
682
683#define float1 float
684#define half1 half
685#define char1 char
686#define uchar1 uchar
687#define short1 short
688#define ushort1 ushort
689#define int1 int
690#define uint1 uint
691#define long1 long
692#define ulong1 ulong
693#define double1 double
694
695#define vload1(OFFSET, PTR) *(OFFSET + PTR)
696#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
697
698
699#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
700#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
701
702#define NO_STORE(data, offs, ptr) \
703    {                             \
704    }
705
706
707#define vstore_partial_1_0 NO_STORE
708#define vstore_partial_1_1 vstore1
709#define vstore_partial_1_2 NO_STORE
710#define vstore_partial_1_3 NO_STORE
711#define vstore_partial_1_4 NO_STORE
712#define vstore_partial_1_5 NO_STORE
713#define vstore_partial_1_6 NO_STORE
714#define vstore_partial_1_7 NO_STORE
715#define vstore_partial_1_8 NO_STORE
716#define vstore_partial_1_9 NO_STORE
717#define vstore_partial_1_10 NO_STORE
718#define vstore_partial_1_11 NO_STORE
719#define vstore_partial_1_12 NO_STORE
720#define vstore_partial_1_13 NO_STORE
721#define vstore_partial_1_14 NO_STORE
722#define vstore_partial_1_15 NO_STORE
723#define vstore_partial_1_16 NO_STORE
724
725#define vstore_partial_2_0 NO_STORE
726#define vstore_partial_2_1 vstore_partial_1
727#define vstore_partial_2_2 vstore_partial_2
728#define vstore_partial_2_3 NO_STORE
729#define vstore_partial_2_4 NO_STORE
730#define vstore_partial_2_5 NO_STORE
731#define vstore_partial_2_6 NO_STORE
732#define vstore_partial_2_7 NO_STORE
733#define vstore_partial_2_8 NO_STORE
734#define vstore_partial_2_9 NO_STORE
735#define vstore_partial_2_10 NO_STORE
736#define vstore_partial_2_11 NO_STORE
737#define vstore_partial_2_12 NO_STORE
738#define vstore_partial_2_13 NO_STORE
739#define vstore_partial_2_14 NO_STORE
740#define vstore_partial_2_15 NO_STORE
741#define vstore_partial_2_16 NO_STORE
742
743#define vstore_partial_3_0 NO_STORE
744#define vstore_partial_3_1 vstore_partial_1
745#define vstore_partial_3_2 vstore_partial_2
746#define vstore_partial_3_3 vstore_partial_3
747#define vstore_partial_3_4 NO_STORE
748#define vstore_partial_3_5 NO_STORE
749#define vstore_partial_3_6 NO_STORE
750#define vstore_partial_3_7 NO_STORE
751#define vstore_partial_3_8 NO_STORE
752#define vstore_partial_3_9 NO_STORE
753#define vstore_partial_3_10 NO_STORE
754#define vstore_partial_3_11 NO_STORE
755#define vstore_partial_3_12 NO_STORE
756#define vstore_partial_3_13 NO_STORE
757#define vstore_partial_3_14 NO_STORE
758#define vstore_partial_3_15 NO_STORE
759#define vstore_partial_3_16 NO_STORE
760
761#define vstore_partial_4_0 NO_STORE
762#define vstore_partial_4_1 vstore_partial_1
763#define vstore_partial_4_2 vstore_partial_2
764#define vstore_partial_4_3 vstore_partial_3
765#define vstore_partial_4_4 vstore_partial_4
766#define vstore_partial_4_5 NO_STORE
767#define vstore_partial_4_6 NO_STORE
768#define vstore_partial_4_7 NO_STORE
769#define vstore_partial_4_8 NO_STORE
770#define vstore_partial_4_9 NO_STORE
771#define vstore_partial_4_10 NO_STORE
772#define vstore_partial_4_11 NO_STORE
773#define vstore_partial_4_12 NO_STORE
774#define vstore_partial_4_13 NO_STORE
775#define vstore_partial_4_14 NO_STORE
776#define vstore_partial_4_15 NO_STORE
777#define vstore_partial_4_16 NO_STORE
778
779#define vstore_partial_8_0 NO_STORE
780#define vstore_partial_8_1 vstore_partial_1
781#define vstore_partial_8_2 vstore_partial_2
782#define vstore_partial_8_3 vstore_partial_3
783#define vstore_partial_8_4 vstore_partial_4
784#define vstore_partial_8_5 vstore_partial_5
785#define vstore_partial_8_6 vstore_partial_6
786#define vstore_partial_8_7 vstore_partial_7
787#define vstore_partial_8_8 vstore_partial_8
788#define vstore_partial_8_9 NO_STORE
789#define vstore_partial_8_10 NO_STORE
790#define vstore_partial_8_11 NO_STORE
791#define vstore_partial_8_12 NO_STORE
792#define vstore_partial_8_13 NO_STORE
793#define vstore_partial_8_14 NO_STORE
794#define vstore_partial_8_15 NO_STORE
795#define vstore_partial_8_16 NO_STORE
796
797#define vstore_partial_16_0 NO_STORE
798#define vstore_partial_16_1 vstore_partial_1
799#define vstore_partial_16_2 vstore_partial_2
800#define vstore_partial_16_3 vstore_partial_3
801#define vstore_partial_16_4 vstore_partial_4
802#define vstore_partial_16_5 vstore_partial_5
803#define vstore_partial_16_6 vstore_partial_6
804#define vstore_partial_16_7 vstore_partial_7
805#define vstore_partial_16_8 vstore_partial_8
806#define vstore_partial_16_9 vstore_partial_9
807#define vstore_partial_16_10 vstore_partial_10
808#define vstore_partial_16_11 vstore_partial_11
809#define vstore_partial_16_12 vstore_partial_12
810#define vstore_partial_16_13 vstore_partial_13
811#define vstore_partial_16_14 vstore_partial_14
812#define vstore_partial_16_15 vstore_partial_15
813#define vstore_partial_16_16 vstore_partial_16
814
815
816#define vstore_partial_1(DATA, OFFSET, PTR) \
817    vstore1(DATA.s0, OFFSET, PTR);
818
819#define vstore_partial_2(DATA, OFFSET, PTR) \
820    vstore2(DATA.s01, OFFSET, PTR);
821
822#define vstore_partial_3(DATA, OFFSET, PTR) \
823    vstore3(DATA.s012, OFFSET, PTR);
824
825#define vstore_partial_4(DATA, OFFSET, PTR) \
826    vstore4(DATA.s0123, OFFSET, PTR);
827
828#define vstore_partial_5(DATA, OFFSET, PTR)    \
829    vstore_partial_4(DATA.s0123, OFFSET, PTR); \
830    vstore1(DATA.s4, OFFSET, PTR + 4);
831
832#define vstore_partial_6(DATA, OFFSET, PTR)    \
833    vstore_partial_4(DATA.s0123, OFFSET, PTR); \
834    vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
835
836#define vstore_partial_7(DATA, OFFSET, PTR)    \
837    vstore_partial_4(DATA.s0123, OFFSET, PTR); \
838    vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
839
840#define vstore_partial_8(DATA, OFFSET, PTR) \
841    vstore8(DATA.s01234567, OFFSET, PTR);
842
843#define vstore_partial_9(DATA, OFFSET, PTR)        \
844    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
845    vstore1(DATA.s8, OFFSET, PTR + 8);
846
847#define vstore_partial_10(DATA, OFFSET, PTR)       \
848    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
849    vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
850
851#define vstore_partial_11(DATA, OFFSET, PTR)       \
852    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
853    vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
854
855#define vstore_partial_12(DATA, OFFSET, PTR)       \
856    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
857    vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
858
859#define vstore_partial_13(DATA, OFFSET, PTR)       \
860    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
861    vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
862
863#define vstore_partial_14(DATA, OFFSET, PTR)       \
864    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
865    vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
866
867#define vstore_partial_15(DATA, OFFSET, PTR)       \
868    vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
869    vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
870
871#define vstore_partial_16(DATA, OFFSET, PTR) \
872    vstore16(DATA, OFFSET, PTR);
873
874
875
876
877
878#define convert_float_sat convert_float
879#define convert_float1_sat convert_float
880#define convert_float2_sat convert_float2
881#define convert_float3_sat convert_float3
882#define convert_float4_sat convert_float4
883#define convert_float8_sat convert_float8
884#define convert_float16_sat convert_float16
885#define convert_half_sat convert_float
886#define convert_half1_sat convert_half
887#define convert_half2_sat convert_half2
888#define convert_half3_sat convert_half3
889#define convert_half4_sat convert_half4
890#define convert_half8_sat convert_half8
891#define convert_half16_sat convert_half16
892
893#define convert_float1 convert_float
894#define convert_half1 convert_half
895#define convert_char1 convert_char
896#define convert_uchar1 convert_uchar
897#define convert_short1 convert_short
898#define convert_ushort1 convert_ushort
899#define convert_int1 convert_int
900#define convert_uint1 convert_uint
901#define convert_long1 convert_long
902#define convert_ulong1 convert_ulong
903#define convert_double1 convert_double
904
905#define convert_char1_sat convert_char_sat
906#define convert_uchar1_sat convert_uchar_sat
907#define convert_uchar2_sat convert_uchar2_sat
908#define convert_uchar3_sat convert_uchar3_sat
909#define convert_uchar4_sat convert_uchar4_sat
910#define convert_uchar8_sat convert_uchar8_sat
911#define convert_uchar16_sat convert_uchar16_sat
912#define convert_short1_sat convert_short_sat
913#define convert_ushort1_sat convert_ushort_sat
914#define convert_int1_sat convert_int_sat
915#define convert_uint1_sat convert_uint_sat
916#define convert_long1_sat convert_long_sat
917#define convert_ulong1_sat convert_ulong_sat
918#define convert_double1_sat convert_double_sat
919
920#define VEC_DATA_TYPE_STR(type, size) type##size
921#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
922
923#define CONVERT_STR(x, type) (convert_##type((x)))
924#define CONVERT(x, type) CONVERT_STR(x, type)
925
926#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
927#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
928
929#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
930#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
931
932#define select_vec_dt_uchar(size) uchar##size
933#define select_vec_dt_char(size) char##size
934#define select_vec_dt_ushort(size) ushort##size
935#define select_vec_dt_short(size) short##size
936#define select_vec_dt_half(size) short##size
937#define select_vec_dt_uint(size) uint##size
938#define select_vec_dt_int(size) int##size
939#define select_vec_dt_float(size) int##size
940#define select_vec_dt_ulong(size) ulong##size
941#define select_vec_dt_long(size) long##size
942
943#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
944#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
945#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
946
947#define signed_int_vec_dt_uchar(size) char##size
948#define signed_int_vec_dt_char(size) char##size
949#define signed_int_vec_dt_ushort(size) short##size
950#define signed_int_vec_dt_short(size) short##size
951#define signed_int_vec_dt_half(size) short##size
952#define signed_int_vec_dt_uint(size) int##size
953#define signed_int_vec_dt_int(size) int##size
954#define signed_int_vec_dt_float(size) int##size
955#define signed_int_vec_dt_ulong(size) long##size
956#define signed_int_vec_dt_long(size) long##size
957
958#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
959#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
960#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
961
962#define sum_reduce_1(x) (x)
963#define sum_reduce_2(x) ((x).s0) + ((x).s1)
964#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
965#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
966#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
967#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
968
969#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
970#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
971
972#define prod_reduce_1(x) (x)
973#define prod_reduce_2(x) ((x).s0) * ((x).s1)
974#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
975#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
976#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
977#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
978
979#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
980#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
981
982#define max_reduce_1(x) (x)
983#define max_reduce_2(x) max(((x).s0), ((x).s1))
984#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
985#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
986#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
987#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
988
989#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
990#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
991
992#define VECTOR_DECLARATION(name)     \
993    __global uchar *name##_ptr,      \
994    uint        name##_stride_x, \
995    uint        name##_step_x,   \
996    uint        name##_offset_first_element_in_bytes
997
998#define IMAGE_DECLARATION(name)      \
999    __global uchar *name##_ptr,      \
1000    uint        name##_stride_x, \
1001    uint        name##_step_x,   \
1002    uint        name##_stride_y, \
1003    uint        name##_step_y,   \
1004    uint        name##_offset_first_element_in_bytes
1005
1006#define TENSOR3D_DECLARATION(name)   \
1007    __global uchar *name##_ptr,      \
1008    uint        name##_stride_x, \
1009    uint        name##_step_x,   \
1010    uint        name##_stride_y, \
1011    uint        name##_step_y,   \
1012    uint        name##_stride_z, \
1013    uint        name##_step_z,   \
1014    uint        name##_offset_first_element_in_bytes
1015
1016#define TENSOR4D_DECLARATION(name)   \
1017    __global uchar *name##_ptr,      \
1018    uint        name##_stride_x, \
1019    uint        name##_step_x,   \
1020    uint        name##_stride_y, \
1021    uint        name##_step_y,   \
1022    uint        name##_stride_z, \
1023    uint        name##_step_z,   \
1024    uint        name##_stride_w, \
1025    uint        name##_step_w,   \
1026    uint        name##_offset_first_element_in_bytes
1027
1028#define TENSOR5D_DECLARATION(name)   \
1029    __global uchar *name##_ptr,      \
1030    uint        name##_stride_x, \
1031    uint        name##_step_x,   \
1032    uint        name##_stride_y, \
1033    uint        name##_step_y,   \
1034    uint        name##_stride_z, \
1035    uint        name##_step_z,   \
1036    uint        name##_stride_w, \
1037    uint        name##_step_w,   \
1038    uint        name##_stride_v, \
1039    uint        name##_step_v,   \
1040    uint        name##_offset_first_element_in_bytes
1041
1042#define CONVERT_TO_VECTOR_STRUCT(name) \
1043    update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
1044
1045#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
1046    update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
1047
1048#define CONVERT_TO_IMAGE_STRUCT(name) \
1049    update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
1050
1051#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
1052    update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
1053
1054#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
1055    update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
1056
1057#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
1058    update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
1059
1060#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
1061    update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
1062
1063#define CONVERT_TO_TENSOR3D_STRUCT(name)                                                                                                           \
1064    update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
1065                                 name##_stride_z, name##_step_z)
1066
1067#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
1068    update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
1069
1070#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size)                                                                                                 \
1071    update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
1072                                 name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
1073
1074#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
1075    update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
1076
1077#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name)                                                                                       \
1078    tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
1079                           name##_stride_z, name##_step_z)
1080
1081
1082typedef struct Vector
1083{
1084    __global uchar *ptr;
1085    int             offset_first_element_in_bytes;
1086    int             stride_x;
1087} Vector;
1088
1089
1090typedef struct Image
1091{
1092    __global uchar *ptr;
1093    int             offset_first_element_in_bytes;
1094    int             stride_x;
1095    int             stride_y;
1096} Image;
1097
1098
1099typedef struct Tensor3D
1100{
1101    __global uchar *ptr;
1102    int             offset_first_element_in_bytes;
1103    int             stride_x;
1104    int             stride_y;
1105    int             stride_z;
1106} Tensor3D;
1107
1108
1109typedef struct Tensor4D
1110{
1111    __global uchar *ptr;
1112    int             offset_first_element_in_bytes;
1113    int             stride_x;
1114    int             stride_y;
1115    int             stride_z;
1116    int             stride_w;
1117} Tensor4D;
1118
1119
1120inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
1121{
1122    Vector vector =
1123    {
1124        .ptr                           = ptr,
1125        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1126        .stride_x                      = stride_x,
1127    };
1128    vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
1129    return vector;
1130}
1131
1132
1133inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
1134{
1135    Image img =
1136    {
1137        .ptr                           = ptr,
1138        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1139        .stride_x                      = stride_x,
1140        .stride_y                      = stride_y
1141    };
1142    img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
1143    return img;
1144}
1145
1146
1147inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1148{
1149    Image img =
1150    {
1151        .ptr                           = ptr,
1152        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1153        .stride_x                      = stride_x,
1154        .stride_y                      = stride_y
1155    };
1156    img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
1157    return img;
1158}
1159
1160
1161inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1162{
1163    Tensor3D tensor =
1164    {
1165        .ptr                           = ptr,
1166        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1167        .stride_x                      = stride_x,
1168        .stride_y                      = stride_y,
1169        .stride_z                      = stride_z
1170    };
1171    tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
1172    return tensor;
1173}
1174
1175
1176inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
1177{
1178    Tensor3D tensor =
1179    {
1180        .ptr                           = ptr,
1181        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1182        .stride_x                      = stride_x,
1183        .stride_y                      = stride_y,
1184        .stride_z                      = stride_z
1185    };
1186    return tensor;
1187}
1188
1189inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
1190                                             uint step_w,
1191                                             uint mod_size)
1192{
1193    Tensor4D tensor =
1194    {
1195        .ptr                           = ptr,
1196        .offset_first_element_in_bytes = offset_first_element_in_bytes,
1197        .stride_x                      = stride_x,
1198        .stride_y                      = stride_y,
1199        .stride_z                      = stride_z,
1200        .stride_w                      = stride_w
1201    };
1202
1203    tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
1204    return tensor;
1205}
1206
1207
1208inline __global const uchar *vector_offset(const Vector *vec, int x)
1209{
1210    return vec->ptr + x * vec->stride_x;
1211}
1212
1213
1214inline __global uchar *offset(const Image *img, int x, int y)
1215{
1216    return img->ptr + x * img->stride_x + y * img->stride_y;
1217}
1218
1219
1220inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
1221{
1222    return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
1223}
1224
1225
1226inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
1227{
1228    return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
1229}
1230
1231
1232inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
1233{
1234    uint num_elements = width * height;
1235
1236    const uint z = index / num_elements;
1237
1238    index %= num_elements;
1239
1240    const uint y = index / width;
1241
1242    index %= width;
1243
1244    const uint x = index;
1245
1246    return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
1247}
1248
1249#endif
1250
1251
1252#define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x)))
1253#define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type)
1254
1255
1256inline uchar quantize_qasymm8(float input, float offset, float scale)
1257{
1258    float out_f32 = input / scale + offset;
1259    uchar res_u8  = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, int), uchar);
1260    return res_u8;
1261}
1262
1263
1264inline float dequantize_qasymm8(uchar input, float offset, float scale)
1265{
1266    return ((float)input - offset) * scale;
1267}
1268
1269
1270inline float dequantize_qasymm8_signed(char input, float offset, float scale)
1271{
1272    return ((float)input - offset) * scale;
1273}
1274
1275
1276#define QUANTIZE_IMPL(type, size)                                                                                       \
1277    inline VEC_DATA_TYPE(type, size) quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \
1278    {                                                                                                                   \
1279        VEC_DATA_TYPE(float, size)                                                                                      \
1280        out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset);                   \
1281        VEC_DATA_TYPE(type, size)                                                                                       \
1282        res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size));              \
1283        return res;                                                                                                     \
1284    }
1285
1286
1287#define DEQUANTIZE_IMPL(type, size)                                                                                       \
1288    inline VEC_DATA_TYPE(float, size) dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \
1289    {                                                                                                                     \
1290        return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale;                                             \
1291    }
1292
1293
1294#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size)                                                                                        \
1295    inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \
1296    {                                                                                                                                   \
1297        const VEC_DATA_TYPE(int, size)                                                                                                  \
1298        zero = (VEC_DATA_TYPE(int, size))0;                                                                                         \
1299        const VEC_DATA_TYPE(int, size)                                                                                                  \
1300        one = (VEC_DATA_TYPE(int, size))1;                                                                                          \
1301        VEC_DATA_TYPE(int, size)                                                                                                        \
1302        mask = (one << exponent) - one;                                                                                                 \
1303        VEC_DATA_TYPE(int, size)                                                                                                        \
1304        threshold = (mask >> 1) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))(x < 0));                                          \
1305        return (x >> exponent) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))((x & mask) > threshold));                          \
1306    }
1307
1308
1309#define ASYMM_MULT_IMPL(size)                                                                                \
1310    inline VEC_DATA_TYPE(int, size) asymm_mult##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
1311    {                                                                                                        \
1312        VEC_DATA_TYPE(int, size)                                                                             \
1313        overflow = a == b && a == INT_MIN;                                                                   \
1314        VEC_DATA_TYPE(long, size)                                                                            \
1315        a_64 = convert_long##size(a);                                                                        \
1316        VEC_DATA_TYPE(long, size)                                                                            \
1317        b_64 = convert_long##size(b);                                                                        \
1318        VEC_DATA_TYPE(long, size)                                                                            \
1319        ab_64 = a_64 * b_64;                                                                                 \
1320                                                                                      \
1321        VEC_DATA_TYPE(long, size)                                                                            \
1322        mask1 = 1 << 30;                                                                                     \
1323        VEC_DATA_TYPE(long, size)                                                                            \
1324        mask2 = 1 - (1 << 30);                                                                               \
1325        VEC_DATA_TYPE(long, size)                                                                            \
1326        is_positive_or_zero = ab_64 >= 0;                                                                    \
1327        VEC_DATA_TYPE(long, size)                                                                            \
1328        nudge = select(mask2, mask1, (SELECT_VEC_DATA_TYPE(long, size))(is_positive_or_zero));               \
1329        VEC_DATA_TYPE(long, size)                                                                            \
1330        mask = 1ll << 31;                                                                                    \
1331        VEC_DATA_TYPE(int, size)                                                                             \
1332        ab_x2_high32 = convert_int##size((ab_64 + nudge) / mask);                                            \
1333        return select(ab_x2_high32, INT_MAX, (SELECT_VEC_DATA_TYPE(int, size))(overflow));                   \
1334    }
1335
1336
1337#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size)                                                    \
1338    inline VEC_DATA_TYPE(int, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) a) \
1339    {                                                                                                                               \
1340        const VEC_DATA_TYPE(int, size) constant_term     = 1895147668;                                                              \
1341        const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883;                                                               \
1342        const int k_fractional_bits = 31;                                                                                           \
1343        VEC_DATA_TYPE(int, size)                                                                                                    \
1344        x = a + (1 << (k_fractional_bits - 3));                                                                                     \
1345        VEC_DATA_TYPE(int, size)                                                                                                    \
1346        x2 = ASYMM_MULT(x, x, size);                                                                                                \
1347        VEC_DATA_TYPE(int, size)                                                                                                    \
1348        x3 = ASYMM_MULT(x2, x, size);                                                                                               \
1349        VEC_DATA_TYPE(int, size)                                                                                                    \
1350        x4 = ASYMM_MULT(x2, x2, size);                                                                                              \
1351        VEC_DATA_TYPE(int, size)                                                                                                    \
1352        x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size);                                                                     \
1353        VEC_DATA_TYPE(int, size)                                                                                                    \
1354        x4_over_24_plus_x3_over_6_plus_x2 = ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2;                             \
1355        VEC_DATA_TYPE(int, size)                                                                                                    \
1356        x4_over_24_plus_x3_over_6_plus_x2_over_2 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size);       \
1357        return constant_term + ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size);                       \
1358    }
1359
1360
1361#define ASYMM_SELECT_USING_MASK_IMPL(size)                                                                                                                                \
1362    inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size(VEC_DATA_TYPE(int, size) if_mask, VEC_DATA_TYPE(int, size) then_val, VEC_DATA_TYPE(int, size) else_val) \
1363    {                                                                                                                                                                     \
1364        return (if_mask & then_val) ^ (~if_mask & else_val);                                                                                                              \
1365    }
1366
1367
1368#define ASYMM_MASK_IF_ZERO_IMPL(size)                                                    \
1369    inline VEC_DATA_TYPE(int, size) asymm_mask_if_zero##size(VEC_DATA_TYPE(int, size) a) \
1370    {                                                                                    \
1371        const VEC_DATA_TYPE(int, size) all_zeros = 0;                                    \
1372        const VEC_DATA_TYPE(int, size) all_ones  = ~0;                                   \
1373        return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a == 0));   \
1374    }
1375
1376
1377#define ASYMM_MASK_IF_NON_ZERO_IMPL(size)                                                    \
1378    inline VEC_DATA_TYPE(int, size) asymm_mask_if_non_zero##size(VEC_DATA_TYPE(int, size) a) \
1379    {                                                                                        \
1380        const VEC_DATA_TYPE(int, size) all_zeros = 0;                                        \
1381        const VEC_DATA_TYPE(int, size) all_ones  = ~0;                                       \
1382        return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a != 0));       \
1383    }
1384
1385#define EXP_BARREL_SHIFTER_IMPL(size)                                                                                                                                                                         \
1386    inline VEC_DATA_TYPE(int, size) exp_barrel_shifter##size(VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \
1387    {                                                                                                                                                                                                         \
1388        if(k_integer_bits > exponent)                                                                                                                                                                         \
1389        {                                                                                                                                                                                                     \
1390            const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0;                                                                                                          \
1391            return ASYMM_SELECT_USING_MASK(                                                                                                                                                                   \
1392                    ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size),                                                                                                                              \
1393                    ASYMM_MULT(result, fp_multiplier, size), result, size);                                                                                                                                       \
1394        }                                                                                                                                                                                                     \
1395        \
1396        return result;                                                                                                                                                                                        \
1397    }
1398
1399
1400#define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size)                                                                               \
1401    inline VEC_DATA_TYPE(int, size) asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits)        \
1402    {                                                                                                                         \
1403        const int k_fractional_bits = 31 - k_integer_bits;                                                                    \
1404        VEC_DATA_TYPE(int, size)                                                                                              \
1405        k_one_quarter = 1 << (k_fractional_bits - 2);                                                                         \
1406        VEC_DATA_TYPE(int, size)                                                                                              \
1407        mask = k_one_quarter - 1;                                                                                             \
1408        VEC_DATA_TYPE(int, size)                                                                                              \
1409        a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter;                                                         \
1410        VEC_DATA_TYPE(int, size)                                                                                              \
1411        a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits;                           \
1412        VEC_DATA_TYPE(int, size)                                                                                              \
1413        result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a_mod_quarter_minus_one_quarter_scaled, size); \
1414        VEC_DATA_TYPE(int, size)                                                                                              \
1415        remainder = a_mod_quarter_minus_one_quarter - a;                                                                      \
1416        \
1417        result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, remainder, size);              \
1418        result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, remainder, size);              \
1419        result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, remainder, size);               \
1420        result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, remainder, size);               \
1421        result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, remainder, size);                \
1422        result = EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size);                  \
1423        result = EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size);                     \
1424        \
1425        if(k_integer_bits > 5)                                                                                                \
1426        {                                                                                                                     \
1427            const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5));                                           \
1428            result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size);                       \
1429        }                                                                                                                     \
1430        \
1431        const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX;                                                                      \
1432        return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size);                                    \
1433    }
1434
1435
1436#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size)                                                                  \
1437    inline VEC_DATA_TYPE(int, size) asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \
1438    {                                                                                                                      \
1439        if(exponent < 0)                                                                                                   \
1440        {                                                                                                                  \
1441            return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size);                                                      \
1442        }                                                                                                                  \
1443        \
1444        const VEC_DATA_TYPE(int, size) min = INT_MIN;                                                                      \
1445        const VEC_DATA_TYPE(int, size) max = INT_MAX;                                                                      \
1446        int threshold = ((1 << (31 - exponent)) - 1);                                                                      \
1447        VEC_DATA_TYPE(int, size)                                                                                           \
1448        positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size);                                                       \
1449        VEC_DATA_TYPE(int, size)                                                                                           \
1450        negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size);                                                      \
1451        VEC_DATA_TYPE(int, size)                                                                                           \
1452        result = x << exponent;                                                                                            \
1453        result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size);                                                \
1454        result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size);                                                \
1455        return result;                                                                                                     \
1456    }
1457
1458
1459#define ASYMM_ROUNDING_HALF_SUM_IMPL(size)                                                                                \
1460    inline VEC_DATA_TYPE(int, size) asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
1461    {                                                                                                                     \
1462        VEC_DATA_TYPE(long, size)                                                                                         \
1463        a64 = convert_long##size(a);                                                                                      \
1464        VEC_DATA_TYPE(long, size)                                                                                         \
1465        b64 = convert_long##size(b);                                                                                      \
1466        VEC_DATA_TYPE(long, size)                                                                                         \
1467        sum = a64 + b64;                                                                                                  \
1468        const VEC_DATA_TYPE(long, size) one       = 1;                                                                    \
1469        const VEC_DATA_TYPE(long, size) minus_one = -1;                                                                   \
1470        VEC_DATA_TYPE(long, size)                                                                                         \
1471        sign = select(minus_one, one, (SELECT_VEC_DATA_TYPE(long, size))(sum >= 0));                                      \
1472        return convert_int##size((sum + sign) / 2);                                                                       \
1473    }
1474
1475
1476#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(size)                                                    \
1477    inline VEC_DATA_TYPE(int, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(VEC_DATA_TYPE(int, size) a) \
1478    {                                                                                                        \
1479        const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX;                                                     \
1480        const VEC_DATA_TYPE(int, size) Q2_one = 1 << (31 - 2);                                               \
1481        VEC_DATA_TYPE(int, size)                                                                             \
1482        half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size);                                         \
1483        const VEC_DATA_TYPE(int, size) Q2_48_over_17     = 1515870810;                                       \
1484        const VEC_DATA_TYPE(int, size) Q2_neg_32_over_17 = -1010580540;                                      \
1485        VEC_DATA_TYPE(int, size)                                                                             \
1486        x = Q2_48_over_17 + ASYMM_MULT(half_denominator, Q2_neg_32_over_17, size);                           \
1487        for(int i = 0; i < 3; i++)                                                                           \
1488        {                                                                                                    \
1489            VEC_DATA_TYPE(int, size)                                                                         \
1490            half_denominator_times_x = ASYMM_MULT(half_denominator, x, size);                                \
1491            VEC_DATA_TYPE(int, size)                                                                         \
1492            one_minus_half_denominator_times_x = Q2_one - half_denominator_times_x;                          \
1493            VEC_DATA_TYPE(int, size)                                                                         \
1494            tmp = ASYMM_MULT(x, one_minus_half_denominator_times_x, size);                                   \
1495            x   = x + ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(tmp, 2, size);                                  \
1496        }                                                                                                    \
1497        return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, 1, size);                                           \
1498    }
1499
1500
1501#define ASYMM_RESCALE_IMPL(size)                                                                                                    \
1502    inline VEC_DATA_TYPE(int, size) asymm_rescale##size(VEC_DATA_TYPE(int, size) value, int src_integer_bits, int dst_integer_bits) \
1503    {                                                                                                                               \
1504        int exponent = src_integer_bits - dst_integer_bits;                                                                         \
1505        return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size);                                                       \
1506    }
1507
1508#define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale)
1509#define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size)
1510#define DEQUANTIZE_STR(input, offset, scale, type, size) dequantize_##type##size(input, offset, scale)
1511#define DEQUANTIZE(input, offset, scale, type, size) DEQUANTIZE_STR(input, offset, scale, type, size)
1512
1513#define ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) asymm_rounding_divide_by_POW2_##size(x, exponent)
1514#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size)
1515#define ASYMM_MULT_STR(a, b, size) asymm_mult##size(a, b)
1516#define ASYMM_MULT(a, b, size) ASYMM_MULT_STR(a, b, size)
1517#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \
1518    ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size)
1519#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \
1520    ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size)
1521#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a)
1522#define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) asymm_select_using_mask##size(if_mask, then_val, else_val)
1523#define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a)
1524#define ASYMM_MASK_IF_NON_ZERO(a, size) asymm_mask_if_non_zero##size(a)
1525#define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder, size) exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder)
1526#define ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) asymm_exp_on_negative_values##size(a, k_integer_bits)
1527#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size)
1528#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(a)
1529#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size)
1530#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) asymm_saturating_rounding_mult_by_pow2##size(x, exponent)
1531#define ASYMM_ROUNDING_HALF_SUM(a, b, size) asymm_rounding_half_sum##size(a, b)
1532#define ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) asymm_rescale##size(value, src_integer_bits, dst_integer_bits)
1533#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size)
1534
1535#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size)                                                                             \
1536    inline VEC_DATA_TYPE(int, size) multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \
1537    {                                                                                                                           \
1538        const int left_shift  = shift > 0 ? shift : 0;                                                                          \
1539        const int right_shift = shift > 0 ? 0 : -shift;                                                                         \
1540        return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), right_shift, size);             \
1541    }
1542#define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) multiply_by_quantized_multiplier##size(input, qmul, shift)
1543
1544QUANTIZE_IMPL(uchar, 1)
1545QUANTIZE_IMPL(char, 1)
1546QUANTIZE_IMPL(uint, 1)
1547QUANTIZE_IMPL(int, 1)
1548QUANTIZE_IMPL(uchar, 2)
1549QUANTIZE_IMPL(char, 2)
1550QUANTIZE_IMPL(uint, 2)
1551QUANTIZE_IMPL(int, 2)
1552QUANTIZE_IMPL(uchar, 3)
1553QUANTIZE_IMPL(char, 3)
1554QUANTIZE_IMPL(uint, 3)
1555QUANTIZE_IMPL(int, 3)
1556QUANTIZE_IMPL(uchar, 4)
1557QUANTIZE_IMPL(ushort, 4)
1558QUANTIZE_IMPL(short, 4)
1559QUANTIZE_IMPL(int, 4)
1560QUANTIZE_IMPL(uchar, 8)
1561QUANTIZE_IMPL(char, 8)
1562QUANTIZE_IMPL(uint, 8)
1563QUANTIZE_IMPL(int, 8)
1564QUANTIZE_IMPL(uchar, 16)
1565QUANTIZE_IMPL(char, 16)
1566QUANTIZE_IMPL(ushort, 16)
1567QUANTIZE_IMPL(short, 16)
1568QUANTIZE_IMPL(uint, 16)
1569QUANTIZE_IMPL(int, 16)
1570
1571DEQUANTIZE_IMPL(uchar, 1)
1572DEQUANTIZE_IMPL(char, 1)
1573DEQUANTIZE_IMPL(uint, 1)
1574DEQUANTIZE_IMPL(int, 1)
1575DEQUANTIZE_IMPL(uchar, 2)
1576DEQUANTIZE_IMPL(char, 2)
1577DEQUANTIZE_IMPL(uint, 2)
1578DEQUANTIZE_IMPL(int, 2)
1579DEQUANTIZE_IMPL(uchar, 3)
1580DEQUANTIZE_IMPL(char, 3)
1581DEQUANTIZE_IMPL(uint, 3)
1582DEQUANTIZE_IMPL(int, 3)
1583DEQUANTIZE_IMPL(uchar, 4)
1584DEQUANTIZE_IMPL(ushort, 4)
1585DEQUANTIZE_IMPL(short, 4)
1586DEQUANTIZE_IMPL(int, 4)
1587DEQUANTIZE_IMPL(uchar, 8)
1588DEQUANTIZE_IMPL(char, 8)
1589DEQUANTIZE_IMPL(uint, 8)
1590DEQUANTIZE_IMPL(int, 8)
1591DEQUANTIZE_IMPL(uchar, 16)
1592DEQUANTIZE_IMPL(char, 16)
1593DEQUANTIZE_IMPL(ushort, 16)
1594DEQUANTIZE_IMPL(short, 16)
1595DEQUANTIZE_IMPL(uint, 16)
1596DEQUANTIZE_IMPL(int, 16)
1597
1598ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1)
1599ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2)
1600ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(3)
1601ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4)
1602ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8)
1603ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16)
1604
1605ASYMM_MULT_IMPL(1)
1606ASYMM_MULT_IMPL(2)
1607ASYMM_MULT_IMPL(3)
1608ASYMM_MULT_IMPL(4)
1609ASYMM_MULT_IMPL(8)
1610ASYMM_MULT_IMPL(16)
1611
1612ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(1)
1613ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(2)
1614ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(3)
1615ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(4)
1616ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(8)
1617ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(16)
1618
1619ASYMM_SELECT_USING_MASK_IMPL(1)
1620ASYMM_SELECT_USING_MASK_IMPL(2)
1621ASYMM_SELECT_USING_MASK_IMPL(3)
1622ASYMM_SELECT_USING_MASK_IMPL(4)
1623ASYMM_SELECT_USING_MASK_IMPL(8)
1624ASYMM_SELECT_USING_MASK_IMPL(16)
1625
1626ASYMM_MASK_IF_ZERO_IMPL(1)
1627ASYMM_MASK_IF_ZERO_IMPL(2)
1628ASYMM_MASK_IF_ZERO_IMPL(3)
1629ASYMM_MASK_IF_ZERO_IMPL(4)
1630ASYMM_MASK_IF_ZERO_IMPL(8)
1631ASYMM_MASK_IF_ZERO_IMPL(16)
1632
1633ASYMM_MASK_IF_NON_ZERO_IMPL(1)
1634ASYMM_MASK_IF_NON_ZERO_IMPL(2)
1635ASYMM_MASK_IF_NON_ZERO_IMPL(3)
1636ASYMM_MASK_IF_NON_ZERO_IMPL(4)
1637ASYMM_MASK_IF_NON_ZERO_IMPL(8)
1638ASYMM_MASK_IF_NON_ZERO_IMPL(16)
1639
1640EXP_BARREL_SHIFTER_IMPL(1)
1641EXP_BARREL_SHIFTER_IMPL(2)
1642EXP_BARREL_SHIFTER_IMPL(3)
1643EXP_BARREL_SHIFTER_IMPL(4)
1644EXP_BARREL_SHIFTER_IMPL(8)
1645EXP_BARREL_SHIFTER_IMPL(16)
1646
1647ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(1)
1648ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(2)
1649ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(3)
1650ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(4)
1651ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(8)
1652ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(16)
1653
1654ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(1)
1655ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(2)
1656ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(3)
1657ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(4)
1658ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(8)
1659ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(16)
1660
1661ASYMM_ROUNDING_HALF_SUM_IMPL(1)
1662ASYMM_ROUNDING_HALF_SUM_IMPL(2)
1663ASYMM_ROUNDING_HALF_SUM_IMPL(3)
1664ASYMM_ROUNDING_HALF_SUM_IMPL(4)
1665ASYMM_ROUNDING_HALF_SUM_IMPL(8)
1666ASYMM_ROUNDING_HALF_SUM_IMPL(16)
1667
1668ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(1)
1669ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(2)
1670ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(3)
1671ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(4)
1672ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(8)
1673ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(16)
1674
1675ASYMM_RESCALE_IMPL(1)
1676ASYMM_RESCALE_IMPL(2)
1677ASYMM_RESCALE_IMPL(3)
1678ASYMM_RESCALE_IMPL(4)
1679ASYMM_RESCALE_IMPL(8)
1680ASYMM_RESCALE_IMPL(16)
1681
1682MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(1)
1683MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(2)
1684MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(3)
1685MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(4)
1686MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(8)
1687MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(16)
1688
1689#endif
1690
1691#if defined(DATA_TYPE) && defined(DATA_TYPE_DELTAS) && defined(WEIGHT_X) && defined(WEIGHT_Y) && defined(WEIGHT_W) && defined(WEIGHT_H) && defined(IMG_WIDTH) && defined(IMG_HEIGHT) && defined(BOX_FIELDS) && defined(SCALE_BEFORE) && defined(OFFSET_BOXES) && defined(SCALE_BOXES) && defined(OFFSET_DELTAS) && defined(SCALE_DELTAS) && defined(OFFSET_PRED_BOXES) && defined(SCALE_PRED_BOXES)
1692
1693
1694__kernel void bounding_box_transform_quantized(
1695    VECTOR_DECLARATION(boxes),
1696    IMAGE_DECLARATION(pred_boxes),
1697    IMAGE_DECLARATION(deltas))
1698{
1699
1700    Vector boxes      = CONVERT_TO_VECTOR_STRUCT_NO_STEP(boxes);
1701    Image  pred_boxes = CONVERT_TO_IMAGE_STRUCT(pred_boxes);
1702    Image  deltas     = CONVERT_TO_IMAGE_STRUCT(deltas);
1703
1704
1705    const float one     = 1.f;
1706    const float halfone = 0.5f;
1707
1708    const int py           = get_global_id(1);
1709    float4    scale_before = (float4)SCALE_BEFORE;
1710    float4 delta           = DEQUANTIZE(vload4(0, (__global DATA_TYPE_DELTAS *)deltas.ptr), OFFSET_DELTAS, SCALE_DELTAS, DATA_TYPE_DELTAS, 4);
1711    float4 box             = DEQUANTIZE(vload4(0, (__global DATA_TYPE *)vector_offset(&boxes, BOX_FIELDS * py)), OFFSET_BOXES, SCALE_BOXES, DATA_TYPE, 4) / scale_before;
1712
1713
1714    float2 dims    = box.s23 - box.s01 + one;
1715    float2 ctr     = box.s01 + halfone * dims;
1716    float4 weights = (float4)(WEIGHT_X, WEIGHT_Y, WEIGHT_W, WEIGHT_H);
1717    delta /= weights;
1718    delta.s23 = min(delta.s23, (float)BBOX_XFORM_CLIP);
1719
1720
1721    float2 pred_ctr  = delta.s01 * dims + ctr;
1722    float2 pred_dims = exp(delta.s23) * dims;
1723
1724
1725    float4 max_values = (float4)(IMG_WIDTH - 1, IMG_HEIGHT - 1, IMG_WIDTH - 1, IMG_HEIGHT - 1);
1726    float4 sign       = (float4)(-1, -1, 1, 1);
1727    float4 min_values = 0;
1728
1729
1730    float4 pred_box = pred_ctr.s0101 + sign * halfone * pred_dims.s0101;
1731#ifdef OFFSET
1732    pred_box.s23 -= one;
1733#endif
1734    pred_box = CLAMP(pred_box, min_values, max_values);
1735#ifdef SCALE_AFTER
1736    pred_box *= (float4)SCALE_AFTER;
1737#endif
1738
1739
1740    vstore4(QUANTIZE(pred_box, OFFSET_PRED_BOXES, SCALE_PRED_BOXES, DATA_TYPE, 4), 0, (__global DATA_TYPE *)pred_boxes.ptr);
1741}
1742#endif  )"