1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/c8-neon-mull.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c8__neon_mlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32 assert(mr != 0);
33 assert(mr <= 2);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (2 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 kc = round_up_po2(kc, 8 * sizeof(int8_t));
44 int8_t* c0 = c;
45 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr != 2) {
47 c1 = c0;
48 }
49
50 do {
51 int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
52 int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
53 int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
54 int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
55 int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
56 int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
57 int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
58 int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
59 int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
60 int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
61 int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
62 int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
63 int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64 int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
65 int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
66 int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
67 int32x4_t vacc1x0 = vacc0x0;
68 int32x4_t vacc1x1 = vacc0x1;
69 int32x4_t vacc1x2 = vacc0x2;
70 int32x4_t vacc1x3 = vacc0x3;
71 int32x4_t vacc1x4 = vacc0x4;
72 int32x4_t vacc1x5 = vacc0x5;
73 int32x4_t vacc1x6 = vacc0x6;
74 int32x4_t vacc1x7 = vacc0x7;
75 int32x4_t vacc1x8 = vacc0x8;
76 int32x4_t vacc1x9 = vacc0x9;
77 int32x4_t vacc1x10 = vacc0x10;
78 int32x4_t vacc1x11 = vacc0x11;
79 int32x4_t vacc1x12 = vacc0x12;
80 int32x4_t vacc1x13 = vacc0x13;
81 int32x4_t vacc1x14 = vacc0x14;
82 int32x4_t vacc1x15 = vacc0x15;
83
84 size_t p = ks;
85 do {
86 const int8_t* restrict a0 = a[0];
87 if XNN_UNPREDICTABLE(a0 != zero) {
88 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
89 }
90 const int8_t* restrict a1 = a[1];
91 if XNN_UNPREDICTABLE(a1 != zero) {
92 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
93 }
94 a += 2;
95
96 size_t k = kc;
97 // 2x partial unrolled loop to load 16 bytes at a time using MLA.
98 while (k >= 16 * sizeof(int8_t)) {
99 const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
100 const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
101 const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
102 const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
103
104 const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
105 const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
106 const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
107 const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
108 const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
109 const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
110 const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
111 const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
112 const int8x8_t vb8x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
113 const int8x8_t vb9x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
114 const int8x8_t vb10x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
115 const int8x8_t vb11x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
116 const int8x8_t vb12x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
117 const int8x8_t vb13x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
118 const int8x8_t vb14x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
119 const int8x8_t vb15x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
120
121 const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
122 int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
123 int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0);
124 vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
125 vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1);
126 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
127 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
128 const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
129 int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
130 int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0);
131 vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
132 vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1);
133 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
134 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
135 const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
136 int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
137 int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0);
138 vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
139 vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1);
140 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
141 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
142 const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
143 int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
144 int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0);
145 vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
146 vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1);
147 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
148 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
149 const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
150 int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
151 int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0);
152 vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
153 vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1);
154 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
155 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
156 const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
157 int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
158 int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0);
159 vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
160 vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1);
161 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
162 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
163 const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
164 int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
165 int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0);
166 vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
167 vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1);
168 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
169 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
170 const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
171 int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
172 int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0);
173 vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
174 vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1);
175 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
176 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
177 const int8x8_t vb8x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
178 int16x8_t vprod0x8 = vmull_s8(vb8x0, va0x0);
179 int16x8_t vprod1x8 = vmull_s8(vb8x0, va1x0);
180 vprod0x8 = vmlal_s8(vprod0x8, vb8x1, va0x1);
181 vprod1x8 = vmlal_s8(vprod1x8, vb8x1, va1x1);
182 vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
183 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8);
184 const int8x8_t vb9x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
185 int16x8_t vprod0x9 = vmull_s8(vb9x0, va0x0);
186 int16x8_t vprod1x9 = vmull_s8(vb9x0, va1x0);
187 vprod0x9 = vmlal_s8(vprod0x9, vb9x1, va0x1);
188 vprod1x9 = vmlal_s8(vprod1x9, vb9x1, va1x1);
189 vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
190 vacc1x9 = vpadalq_s16(vacc1x9, vprod1x9);
191 const int8x8_t vb10x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
192 int16x8_t vprod0x10 = vmull_s8(vb10x0, va0x0);
193 int16x8_t vprod1x10 = vmull_s8(vb10x0, va1x0);
194 vprod0x10 = vmlal_s8(vprod0x10, vb10x1, va0x1);
195 vprod1x10 = vmlal_s8(vprod1x10, vb10x1, va1x1);
196 vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
197 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10);
198 const int8x8_t vb11x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
199 int16x8_t vprod0x11 = vmull_s8(vb11x0, va0x0);
200 int16x8_t vprod1x11 = vmull_s8(vb11x0, va1x0);
201 vprod0x11 = vmlal_s8(vprod0x11, vb11x1, va0x1);
202 vprod1x11 = vmlal_s8(vprod1x11, vb11x1, va1x1);
203 vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
204 vacc1x11 = vpadalq_s16(vacc1x11, vprod1x11);
205 const int8x8_t vb12x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
206 int16x8_t vprod0x12 = vmull_s8(vb12x0, va0x0);
207 int16x8_t vprod1x12 = vmull_s8(vb12x0, va1x0);
208 vprod0x12 = vmlal_s8(vprod0x12, vb12x1, va0x1);
209 vprod1x12 = vmlal_s8(vprod1x12, vb12x1, va1x1);
210 vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
211 vacc1x12 = vpadalq_s16(vacc1x12, vprod1x12);
212 const int8x8_t vb13x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
213 int16x8_t vprod0x13 = vmull_s8(vb13x0, va0x0);
214 int16x8_t vprod1x13 = vmull_s8(vb13x0, va1x0);
215 vprod0x13 = vmlal_s8(vprod0x13, vb13x1, va0x1);
216 vprod1x13 = vmlal_s8(vprod1x13, vb13x1, va1x1);
217 vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
218 vacc1x13 = vpadalq_s16(vacc1x13, vprod1x13);
219 const int8x8_t vb14x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
220 int16x8_t vprod0x14 = vmull_s8(vb14x0, va0x0);
221 int16x8_t vprod1x14 = vmull_s8(vb14x0, va1x0);
222 vprod0x14 = vmlal_s8(vprod0x14, vb14x1, va0x1);
223 vprod1x14 = vmlal_s8(vprod1x14, vb14x1, va1x1);
224 vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
225 vacc1x14 = vpadalq_s16(vacc1x14, vprod1x14);
226 const int8x8_t vb15x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
227 int16x8_t vprod0x15 = vmull_s8(vb15x0, va0x0);
228 int16x8_t vprod1x15 = vmull_s8(vb15x0, va1x0);
229 vprod0x15 = vmlal_s8(vprod0x15, vb15x1, va0x1);
230 vprod1x15 = vmlal_s8(vprod1x15, vb15x1, va1x1);
231 vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
232 vacc1x15 = vpadalq_s16(vacc1x15, vprod1x15);
233
234 k -= 16 * sizeof(int8_t);
235 }
236
237 // Handle 8 bytes at a time using MUL.
238 if (k != 0) {
239 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
240 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
241
242 const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
243 const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
244 const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
245 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
246 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
247 const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
248 const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
249 const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
250 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
251 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
252 const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
253 const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
254 const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
255 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
256 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
257 const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
258 const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
259 const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
260 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
261 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
262 const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
263 const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
264 const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
265 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
266 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
267 const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
268 const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
269 const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
270 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
271 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
272 const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
273 const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
274 const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
275 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
276 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
277 const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
278 const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
279 const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
280 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
281 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
282 const int8x8_t vb8 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
283 const int16x8_t vprod0x8 = vmull_s8(vb8, va0);
284 const int16x8_t vprod1x8 = vmull_s8(vb8, va1);
285 vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
286 vacc1x8 = vpadalq_s16(vacc1x8, vprod1x8);
287 const int8x8_t vb9 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
288 const int16x8_t vprod0x9 = vmull_s8(vb9, va0);
289 const int16x8_t vprod1x9 = vmull_s8(vb9, va1);
290 vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
291 vacc1x9 = vpadalq_s16(vacc1x9, vprod1x9);
292 const int8x8_t vb10 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
293 const int16x8_t vprod0x10 = vmull_s8(vb10, va0);
294 const int16x8_t vprod1x10 = vmull_s8(vb10, va1);
295 vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
296 vacc1x10 = vpadalq_s16(vacc1x10, vprod1x10);
297 const int8x8_t vb11 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
298 const int16x8_t vprod0x11 = vmull_s8(vb11, va0);
299 const int16x8_t vprod1x11 = vmull_s8(vb11, va1);
300 vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
301 vacc1x11 = vpadalq_s16(vacc1x11, vprod1x11);
302 const int8x8_t vb12 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
303 const int16x8_t vprod0x12 = vmull_s8(vb12, va0);
304 const int16x8_t vprod1x12 = vmull_s8(vb12, va1);
305 vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
306 vacc1x12 = vpadalq_s16(vacc1x12, vprod1x12);
307 const int8x8_t vb13 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
308 const int16x8_t vprod0x13 = vmull_s8(vb13, va0);
309 const int16x8_t vprod1x13 = vmull_s8(vb13, va1);
310 vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
311 vacc1x13 = vpadalq_s16(vacc1x13, vprod1x13);
312 const int8x8_t vb14 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
313 const int16x8_t vprod0x14 = vmull_s8(vb14, va0);
314 const int16x8_t vprod1x14 = vmull_s8(vb14, va1);
315 vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
316 vacc1x14 = vpadalq_s16(vacc1x14, vprod1x14);
317 const int8x8_t vb15 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
318 const int16x8_t vprod0x15 = vmull_s8(vb15, va0);
319 const int16x8_t vprod1x15 = vmull_s8(vb15, va1);
320 vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
321 vacc1x15 = vpadalq_s16(vacc1x15, vprod1x15);
322
323 k -= 8 * sizeof(int8_t);
324 }
325
326 p -= 2 * sizeof(void*);
327 } while (p != 0);
328
329 #if XNN_ARCH_ARM64
330 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
331 const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
332 const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
333 const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
334 const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
335 const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
336 const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
337 const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
338 const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
339 const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
340 const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
341 const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
342 const int32x4_t vsum1x89 = vpaddq_s32(vacc1x8, vacc1x9);
343 const int32x4_t vsum1xAB = vpaddq_s32(vacc1x10, vacc1x11);
344 const int32x4_t vsum1xCD = vpaddq_s32(vacc1x12, vacc1x13);
345 const int32x4_t vsum1xEF = vpaddq_s32(vacc1x14, vacc1x15);
346
347 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
348 int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
349 int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
350 int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
351 int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
352 int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
353 int32x4_t vacc1x89AB = vpaddq_s32(vsum1x89, vsum1xAB);
354 int32x4_t vacc1xCDEF = vpaddq_s32(vsum1xCD, vsum1xEF);
355 #else
356 const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
357 const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
358 const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
359 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
360 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
361 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
362 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
363 const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
364 const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
365 const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
366 const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
367 const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
368 const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
369 int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
370 const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
371 const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
372 const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
373 const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
374 const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
375 const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
376 int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
377 const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
378 const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
379 const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
380 const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
381 const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
382 const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
383 int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
384 const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
385 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
386 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
387 const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
388 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
389 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
390 int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
391 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
392 const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
393 const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
394 const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
395 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
396 const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
397 int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
398 const int32x2_t vpsum1x8 = vadd_s32(vget_low_s32(vacc1x8), vget_high_s32(vacc1x8));
399 const int32x2_t vpsum1x9 = vadd_s32(vget_low_s32(vacc1x9), vget_high_s32(vacc1x9));
400 const int32x2_t vpsum1xA = vadd_s32(vget_low_s32(vacc1x10), vget_high_s32(vacc1x10));
401 const int32x2_t vpsum1xB = vadd_s32(vget_low_s32(vacc1x11), vget_high_s32(vacc1x11));
402 const int32x2_t vsum1x89 = vpadd_s32(vpsum1x8, vpsum1x9);
403 const int32x2_t vsum1xAB = vpadd_s32(vpsum1xA, vpsum1xB);
404 int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB );
405 const int32x2_t vpsum1xC = vadd_s32(vget_low_s32(vacc1x12), vget_high_s32(vacc1x12));
406 const int32x2_t vpsum1xD = vadd_s32(vget_low_s32(vacc1x13), vget_high_s32(vacc1x13));
407 const int32x2_t vpsum1xE = vadd_s32(vget_low_s32(vacc1x14), vget_high_s32(vacc1x14));
408 const int32x2_t vpsum1xF = vadd_s32(vget_low_s32(vacc1x15), vget_high_s32(vacc1x15));
409 const int32x2_t vsum1xCD = vpadd_s32(vpsum1xC, vpsum1xD);
410 const int32x2_t vsum1xEF = vpadd_s32(vpsum1xE, vpsum1xF);
411 int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF );
412 #endif
413
414 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
415 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
416 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
417
418 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
419 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
420 vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
421 vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
422 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
423 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
424 vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift);
425 vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift);
426
427 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
428 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
429 vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
430 vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
431 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
432 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
433 vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
434 vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
435
436 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
437 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
438 vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
439 vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
440 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
441 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
442 vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
443 vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
444
445 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
446 #if XNN_ARCH_ARM64
447 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
448 int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
449 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
450 int16x8_t vacc1x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF);
451
452 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
453 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
454 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
455 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
456
457 int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
458 int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
459 #else
460 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
461 int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
462 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
463 int16x8_t vacc1x89ABCDEF = vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF));
464
465 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
466 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
467 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
468 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
469
470 int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
471 int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
472 #endif
473
474 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
475 vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
476 vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
477
478 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
479 vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
480 vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
481
482 if (nc >= 16) {
483 vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
484 vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
485
486 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
487 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
488
489 a = (const int8_t**restrict) ((uintptr_t) a - ks);
490
491 nc -= 16;
492 } else {
493 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
494 if (nc & 8) {
495 vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
496 vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
497 vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
498 }
499 if (nc & 4) {
500 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
501 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
502 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
503 }
504 if (nc & 2) {
505 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
506 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
507 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
508 }
509 if (nc & 1) {
510 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
511 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
512 }
513
514 nc = 0;
515 }
516 } while (nc != 0);
517 }
518