xref: /aosp_15_r20/external/XNNPACK/src/qu8-gemm/gen/4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_qu8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128(
20     size_t mr,
21     size_t nc,
22     size_t kc,
23     const uint8_t* restrict a,
24     size_t a_stride,
25     const void* restrict w,
26     uint8_t* restrict c,
27     size_t cm_stride,
28     size_t cn_stride,
29     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(mr != 0);
32   assert(mr <= 4);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(uint8_t) == 0);
36   assert(a != NULL);
37   assert(w != NULL);
38   assert(c != NULL);
39 
40   const uint8_t* a0 = a;
41   uint8_t* c0 = c;
42   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr != 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60 
61   kc = round_up_po2(kc, 8 * sizeof(uint8_t));
62   do {
63     v128_t vacc0x0123 = wasm_v128_load(w);
64     v128_t vacc1x0123 = vacc0x0123;
65     v128_t vacc2x0123 = vacc0x0123;
66     v128_t vacc3x0123 = vacc0x0123;
67     w = (const void*) ((const int32_t*) w + 4);
68 
69     const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
70     size_t k = kc;
71     do {
72       v128_t vxa0 = wasm_u16x8_load8x8((const v128_t*) a0);
73       a0 += 8;
74       v128_t vxa1 = wasm_u16x8_load8x8((const v128_t*) a1);
75       a1 += 8;
76       v128_t vxa2 = wasm_u16x8_load8x8((const v128_t*) a2);
77       a2 += 8;
78       v128_t vxa3 = wasm_u16x8_load8x8((const v128_t*) a3);
79       a3 += 8;
80 
81       const v128_t vb01 = wasm_v128_load(w);
82       const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
83       const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
84 
85       vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
86       vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
87       vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
88       vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
89       vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
90       vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
91       vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
92       vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
93 
94       vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
95       vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
96       vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
97       vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
98       vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
99       vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
100       vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
101       vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
102       const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
103       const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
104       const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
105 
106       vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
107       vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
108       vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
109       vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
110       vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
111       vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
112       vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
113       vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
114 
115       vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
116       vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
117       vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
118       vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
119 
120       w = (const uint8_t*) w + 32;
121       k -= 8 * sizeof(uint8_t);
122     } while (k != 0);
123 
124     vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
125     vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
126     vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
127     vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
128 
129     const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
130     vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
131     vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
132     vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
133     vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
134 
135     const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
136     vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
137     vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
138     vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
139     vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
140 
141     const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
142     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
143     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
144     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
145     vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
146 
147     const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
148     vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
149     vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
150     vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
151     vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
152 
153     v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
154     v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
155 
156     v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
157 
158     const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
159     vout = wasm_u8x16_min(vout, voutput_max);
160 
161     if (nc >= 4) {
162       *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0);
163       *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1);
164       *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2);
165       *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3);
166 
167       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
168       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
169       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
170       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
171 
172       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
173       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
174       a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
175       a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
176 
177       nc -= 4;
178     } else {
179       uint32_t vout0 = wasm_i32x4_extract_lane(vout, 0);
180       uint32_t vout1 = wasm_i32x4_extract_lane(vout, 1);
181       uint32_t vout2 = wasm_i32x4_extract_lane(vout, 2);
182       uint32_t vout3 = wasm_i32x4_extract_lane(vout, 3);
183       if (nc & 2) {
184         *((uint16_t*) c0) = (uint16_t) vout0;
185         vout0 >>= 16;
186         c0 += 2;
187         *((uint16_t*) c1) = (uint16_t) vout1;
188         vout1 >>= 16;
189         c1 += 2;
190         *((uint16_t*) c2) = (uint16_t) vout2;
191         vout2 >>= 16;
192         c2 += 2;
193         *((uint16_t*) c3) = (uint16_t) vout3;
194         vout3 >>= 16;
195         c3 += 2;
196       }
197       if (nc & 1) {
198         *c0 = (uint8_t) vout0;
199         *c1 = (uint8_t) vout1;
200         *c2 = (uint8_t) vout2;
201         *c3 = (uint8_t) vout3;
202       }
203 
204       nc = 0;
205     }
206   } while (nc != 0);
207 }
208