1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/math.h>
17
18
xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2s4__neonv8_mlal(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2s4__neonv8_mlal(
20 size_t mr,
21 size_t nc,
22 size_t kc,
23 size_t ks,
24 const int8_t** restrict a,
25 const void* restrict w,
26 int8_t* restrict c,
27 size_t cm_stride,
28 size_t cn_stride,
29 size_t a_offset,
30 const int8_t* zero,
31 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
32 {
33 assert(mr != 0);
34 assert(mr <= 1);
35 assert(nc != 0);
36 assert(kc != 0);
37 assert(ks != 0);
38 assert(ks % (1 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(int8_t) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 int8_t* c0 = c;
45
46 kc = round_up_po2(kc, 8 * sizeof(int8_t));
47 do {
48 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
49 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
50
51 size_t p = ks;
52 do {
53 const int8_t* restrict a0 = a[0];
54 if XNN_UNPREDICTABLE(a0 != zero) {
55 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
56 }
57 a += 1;
58
59 size_t k = kc;
60 while (k >= 16 * sizeof(int8_t)) {
61 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
62 int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
63
64 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
65 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
66 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
67 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
68 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
69 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
70 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
71 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
72
73 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
74 const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
75 vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
76 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
77 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
78 const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
79 vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
80 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
81 va0x0 = vext_s8(va0x0, va0x0, 2);
82 va0x1 = vext_s8(va0x1, va0x1, 2);
83 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
84 const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
85 vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
86 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
87 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
88 const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
89 vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
90 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
91 va0x0 = vext_s8(va0x0, va0x0, 2);
92 va0x1 = vext_s8(va0x1, va0x1, 2);
93 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
94 const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
95 vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
96 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
97 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
98 const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
99 vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
100 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
101 va0x0 = vext_s8(va0x0, va0x0, 2);
102 va0x1 = vext_s8(va0x1, va0x1, 2);
103 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
104 const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
105 vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
106 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
107 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
108 const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
109 vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
110 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
111
112 k -= 16 * sizeof(int8_t);
113 }
114 if (k != 0) {
115 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
116
117 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
118 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
119 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
120 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
121 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
122 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
123 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
124 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
125
126 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
127 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
128 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
129 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
130 va0x0 = vext_s8(va0x0, va0x0, 2);
131 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
132 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
133 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
134 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
135 va0x0 = vext_s8(va0x0, va0x0, 2);
136 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
137 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
138 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
139 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
140 va0x0 = vext_s8(va0x0, va0x0, 2);
141 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
142 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
143 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
144 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
145
146 }
147
148 p -= 1 * sizeof(void*);
149 } while (p != 0);
150
151 float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
152 float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
153
154 const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
155 vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
156 vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
157
158 vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
159 vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
160
161 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
162 #if XNN_ARCH_ARM64
163 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
164
165 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
166
167 int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
168 #else
169 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
170
171 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
172
173 int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
174 #endif
175
176 const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
177 vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
178
179 const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
180 vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
181
182 if (nc >= 8) {
183 vst1_s8(c0 + 0, vout0x01234567);
184
185 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
186
187 a = (const int8_t**restrict) ((uintptr_t) a - ks);
188
189 nc -= 8;
190 } else {
191 if (nc & 4) {
192 vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
193 vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
194 }
195 if (nc & 2) {
196 vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
197 vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
198 }
199 if (nc & 1) {
200 vst1_lane_s8(c0, vout0x01234567, 0);
201 }
202
203 nc = 0;
204 }
205 } while (nc != 0);
206 }
207