xref: /aosp_15_r20/external/XNNPACK/src/qu8-gemm/gen/4x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_qu8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128(
20     size_t mr,
21     size_t nc,
22     size_t kc,
23     const uint8_t* restrict a,
24     size_t a_stride,
25     const void* restrict w,
26     uint8_t* restrict c,
27     size_t cm_stride,
28     size_t cn_stride,
29     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(mr != 0);
32   assert(mr <= 4);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(uint8_t) == 0);
36   assert(a != NULL);
37   assert(w != NULL);
38   assert(c != NULL);
39 
40   kc = round_up_po2(kc, 2);
41   const uint8_t* a0 = a;
42   uint8_t* c0 = c;
43   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
44   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     a1 = a0;
47     c1 = c0;
48   }
49   const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
50   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
51   if XNN_UNPREDICTABLE(mr <= 2) {
52     a2 = a1;
53     c2 = c1;
54   }
55   const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
56   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
57   if XNN_UNPREDICTABLE(mr != 4) {
58     a3 = a2;
59     c3 = c2;
60   }
61 
62   do {
63     v128_t vacc0x0123 = wasm_v128_load(w);
64     v128_t vacc1x0123 = vacc0x0123;
65     v128_t vacc2x0123 = vacc0x0123;
66     v128_t vacc3x0123 = vacc0x0123;
67     w = (const void*) ((const int32_t*) w + 4);
68 
69     size_t k = kc;
70     const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
71     while (k >= 8 * sizeof(uint8_t)) {
72       const v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
73       a0 += 8;
74       const v128_t vxa1 = wasm_u16x8_load8x8((const v128_t*) a1);
75       a1 += 8;
76       const v128_t vxa2 = wasm_u16x8_load8x8((const v128_t*) a2);
77       a2 += 8;
78       const v128_t vxa3 = wasm_u16x8_load8x8((const v128_t*) a3);
79       a3 += 8;
80 
81       const v128_t vb01 = wasm_v128_load(w);
82       const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
83       const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
84 
85       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
86         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
87       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
88         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
89       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
90         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
91       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
92         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
93 
94       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
95         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
96       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
97         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
98       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
99         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
100       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
101         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
102       const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
103       const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
104       const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
105 
106       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
107         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
108       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
109         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
110       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
111         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
112       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
113         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
114 
115       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
116         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
117       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
118         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
119       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
120         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
121       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
122         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
123 
124       w = (const void*) ((const uint8_t*) w + 32);
125       k -= 8 * sizeof(uint8_t);
126     }
127     if (k != 0) {
128       const v128_t vxa0 = wasm_u16x8_load8x8(a0);
129       a0 = (const uint8_t*) ((uintptr_t) a0 + k);
130       const v128_t vxa1 = wasm_u16x8_load8x8(a1);
131       a1 = (const uint8_t*) ((uintptr_t) a1 + k);
132       const v128_t vxa2 = wasm_u16x8_load8x8(a2);
133       a2 = (const uint8_t*) ((uintptr_t) a2 + k);
134       const v128_t vxa3 = wasm_u16x8_load8x8(a3);
135       a3 = (const uint8_t*) ((uintptr_t) a3 + k);
136 
137       const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
138       w = (const void*) ((const uint8_t*) w + 8);
139 
140       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
141         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
142       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
143         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
144       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
145         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
146       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
147         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
148 
149       if (k > 2 * sizeof(uint8_t)) {
150         const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
151         w = (const void*) ((const uint8_t*) w + 8);
152 
153         vacc0x0123 = wasm_i32x4_add(vacc0x0123,
154           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
155         vacc1x0123 = wasm_i32x4_add(vacc1x0123,
156           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
157         vacc2x0123 = wasm_i32x4_add(vacc2x0123,
158           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
159         vacc3x0123 = wasm_i32x4_add(vacc3x0123,
160           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
161 
162         if (k > 4 * sizeof(uint8_t)) {
163           const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
164           w = (const void*) ((const uint8_t*) w + 8);
165 
166           vacc0x0123 = wasm_i32x4_add(vacc0x0123,
167             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
168           vacc1x0123 = wasm_i32x4_add(vacc1x0123,
169             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
170           vacc2x0123 = wasm_i32x4_add(vacc2x0123,
171             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
172           vacc3x0123 = wasm_i32x4_add(vacc3x0123,
173             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
174         }
175       }
176     }
177 
178     vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
179     vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
180     vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
181     vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
182 
183     const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
184     vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
185     vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
186     vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
187     vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
188 
189     const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
190     vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
191     vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
192     vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
193     vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
194 
195     const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
196     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
197     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
198     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
199     vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
200 
201     const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
202     vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
203     vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
204     vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
205     vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
206 
207     v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
208     v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
209 
210     v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
211 
212     const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
213     vout = wasm_u8x16_min(vout, voutput_max);
214 
215     if (nc >= 4) {
216       *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0);
217       *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1);
218       *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2);
219       *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3);
220 
221       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
222       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
223       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
224       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
225 
226       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
227       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
228       a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
229       a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
230 
231       nc -= 4;
232     } else {
233       uint32_t vout0 = wasm_i32x4_extract_lane(vout, 0);
234       uint32_t vout1 = wasm_i32x4_extract_lane(vout, 1);
235       uint32_t vout2 = wasm_i32x4_extract_lane(vout, 2);
236       uint32_t vout3 = wasm_i32x4_extract_lane(vout, 3);
237       if (nc & 2) {
238         *((uint16_t*) c0) = (uint16_t) vout0;
239         vout0 >>= 16;
240         c0 += 2;
241         *((uint16_t*) c1) = (uint16_t) vout1;
242         vout1 >>= 16;
243         c1 += 2;
244         *((uint16_t*) c2) = (uint16_t) vout2;
245         vout2 >>= 16;
246         c2 += 2;
247         *((uint16_t*) c3) = (uint16_t) vout3;
248         vout3 >>= 16;
249         c3 += 2;
250       }
251       if (nc & 1) {
252         *c0 = (uint8_t) vout0;
253         *c1 = (uint8_t) vout1;
254         *c2 = (uint8_t) vout2;
255         *c3 = (uint8_t) vout3;
256       }
257 
258       nc = 0;
259     }
260   } while (nc != 0);
261 }
262