1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/MRx4c2s4-wasmsimd-dot16x2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_qc8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qc8_gemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64(
20 size_t mr,
21 size_t nc,
22 size_t kc,
23 const int8_t* restrict a,
24 size_t a_stride,
25 const void* restrict w,
26 int8_t* restrict c,
27 size_t cm_stride,
28 size_t cn_stride,
29 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 4);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(int8_t) == 0);
36 assert(a != NULL);
37 assert(w != NULL);
38 assert(c != NULL);
39
40 const int8_t* a0 = a;
41 int8_t* c0 = c;
42 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
55 int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr != 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60
61 kc = round_up_po2(kc, 8 * sizeof(int8_t));
62 do {
63 v128_t vacc0x0123 = wasm_v128_load(w);
64 v128_t vacc1x0123 = vacc0x0123;
65 v128_t vacc2x0123 = vacc0x0123;
66 v128_t vacc3x0123 = vacc0x0123;
67 w = (const void*) ((const int32_t*) w + 4);
68
69 size_t k = kc;
70 do {
71 v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
72 a0 += 8;
73 v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
74 a1 += 8;
75 v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
76 a2 += 8;
77 v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
78 a3 += 8;
79
80 const v128_t vxb0 = wasm_i16x8_load8x8(w);
81
82 vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
83 vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
84 vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
85 vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
86 vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
87 vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
88 vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
89 vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
90 const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
91
92 vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
93 vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
94 vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
95 vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
96 vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
97 vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
98 vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
99 vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
100 const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
101
102 vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
103 vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
104 vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
105 vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
106 vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
107 vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
108 vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
109 vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
110 const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
111
112 vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
113 vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
114 vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
115 vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
116
117 w = (const int8_t*) w + 32;
118 k -= 8 * sizeof(int8_t);
119 } while (k != 0);
120
121 vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
122 vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
123 vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
124 vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
125
126 const v128_t vscale0123 = wasm_v128_load(w);
127 w = (const float*) w + 4;
128 vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
129 vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
130 vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123);
131 vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123);
132
133 const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
134 vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
135 vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
136 vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
137 vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
138
139 const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
140 vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
141 vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
142 vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
143 vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
144
145 const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
146 vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
147 vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
148 vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
149 vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
150
151 v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
152 v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
153
154 v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
155
156 const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
157 vout = wasm_i8x16_min(vout, voutput_max);
158
159 if (nc >= 4) {
160 *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0);
161 *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1);
162 *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2);
163 *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3);
164
165 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
166 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
167 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
168 c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
169
170 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
171 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
172 a2 = (const int8_t*) ((uintptr_t) a2 - kc);
173 a3 = (const int8_t*) ((uintptr_t) a3 - kc);
174
175 nc -= 4;
176 } else {
177 uint32_t vout0 = wasm_i32x4_extract_lane(vout, 0);
178 uint32_t vout1 = wasm_i32x4_extract_lane(vout, 1);
179 uint32_t vout2 = wasm_i32x4_extract_lane(vout, 2);
180 uint32_t vout3 = wasm_i32x4_extract_lane(vout, 3);
181 if (nc & 2) {
182 *((uint16_t*) c0) = (uint16_t) vout0;
183 vout0 >>= 16;
184 c0 += 2;
185 *((uint16_t*) c1) = (uint16_t) vout1;
186 vout1 >>= 16;
187 c1 += 2;
188 *((uint16_t*) c2) = (uint16_t) vout2;
189 vout2 >>= 16;
190 c2 += 2;
191 *((uint16_t*) c3) = (uint16_t) vout3;
192 vout3 >>= 16;
193 c3 += 2;
194 }
195 if (nc & 1) {
196 *c0 = (int8_t) vout0;
197 *c1 = (int8_t) vout1;
198 *c2 = (int8_t) vout2;
199 *c3 = (int8_t) vout3;
200 }
201
202 nc = 0;
203 }
204 } while (nc != 0);
205 }
206