xref: /aosp_15_r20/external/XNNPACK/src/f32-igemm/gen/6x2-minmax-neonfma-lane-ld64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/MRx2-neon-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/igemm.h>
16 
17 
xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_igemm_minmax_ukernel_6x2__neonfma_lane_ld64(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const float**restrict a,
24     const float*restrict w,
25     float*restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const float* zero,
30     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(float) == 0);
37   assert(ks != 0);
38   assert(ks % (6 * sizeof(void*)) == 0);
39   assert(a_offset % sizeof(float) == 0);
40   assert(a != NULL);
41   assert(w != NULL);
42   assert(c != NULL);
43 
44   float* c0 = c;
45   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr != 6) {
63     c5 = c4;
64   }
65 
66   do {
67     float32x2_t vacc0x01 = vld1_f32(w); w += 2;
68     float32x2_t vacc1x01 = vacc0x01;
69     float32x2_t vacc2x01 = vacc0x01;
70     float32x2_t vacc3x01 = vacc0x01;
71     float32x2_t vacc4x01 = vacc0x01;
72     float32x2_t vacc5x01 = vacc0x01;
73 
74     size_t p = ks;
75     do {
76       const float* restrict a0 = a[0];
77       assert(a0 != NULL);
78       if XNN_UNPREDICTABLE(a0 != zero) {
79         a0 = (const float*) ((uintptr_t) a0 + a_offset);
80       }
81       const float* restrict a1 = a[1];
82       assert(a1 != NULL);
83       if XNN_UNPREDICTABLE(a1 != zero) {
84         a1 = (const float*) ((uintptr_t) a1 + a_offset);
85       }
86       const float* restrict a2 = a[2];
87       assert(a2 != NULL);
88       if XNN_UNPREDICTABLE(a2 != zero) {
89         a2 = (const float*) ((uintptr_t) a2 + a_offset);
90       }
91       const float* restrict a3 = a[3];
92       assert(a3 != NULL);
93       if XNN_UNPREDICTABLE(a3 != zero) {
94         a3 = (const float*) ((uintptr_t) a3 + a_offset);
95       }
96       const float* restrict a4 = a[4];
97       assert(a4 != NULL);
98       if XNN_UNPREDICTABLE(a4 != zero) {
99         a4 = (const float*) ((uintptr_t) a4 + a_offset);
100       }
101       const float* restrict a5 = a[5];
102       assert(a5 != NULL);
103       if XNN_UNPREDICTABLE(a5 != zero) {
104         a5 = (const float*) ((uintptr_t) a5 + a_offset);
105       }
106       a += 6;
107 
108       size_t k = kc;
109       for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
110         const float32x2_t va0 = vld1_f32(a0); a0 += 2;
111         const float32x2_t va1 = vld1_f32(a1); a1 += 2;
112         const float32x2_t va2 = vld1_f32(a2); a2 += 2;
113         const float32x2_t va3 = vld1_f32(a3); a3 += 2;
114         const float32x2_t va4 = vld1_f32(a4); a4 += 2;
115         const float32x2_t va5 = vld1_f32(a5); a5 += 2;
116 
117         const float32x2_t vb01c0 = vld1_f32(w); w += 2;
118 
119         #if XNN_ARCH_ARM64
120           vacc0x01 = vfma_lane_f32(vacc0x01, vb01c0, va0, 0);
121           vacc1x01 = vfma_lane_f32(vacc1x01, vb01c0, va1, 0);
122           vacc2x01 = vfma_lane_f32(vacc2x01, vb01c0, va2, 0);
123           vacc3x01 = vfma_lane_f32(vacc3x01, vb01c0, va3, 0);
124           vacc4x01 = vfma_lane_f32(vacc4x01, vb01c0, va4, 0);
125           vacc5x01 = vfma_lane_f32(vacc5x01, vb01c0, va5, 0);
126         #else
127           const float32x2_t va0c0 = vdup_lane_f32(va0, 0);
128           const float32x2_t va1c0 = vdup_lane_f32(va1, 0);
129           const float32x2_t va2c0 = vdup_lane_f32(va2, 0);
130           const float32x2_t va3c0 = vdup_lane_f32(va3, 0);
131           const float32x2_t va4c0 = vdup_lane_f32(va4, 0);
132           const float32x2_t va5c0 = vdup_lane_f32(va5, 0);
133           vacc0x01 = vfma_f32(vacc0x01, va0c0, vb01c0);
134           vacc1x01 = vfma_f32(vacc1x01, va1c0, vb01c0);
135           vacc2x01 = vfma_f32(vacc2x01, va2c0, vb01c0);
136           vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0);
137           vacc4x01 = vfma_f32(vacc4x01, va4c0, vb01c0);
138           vacc5x01 = vfma_f32(vacc5x01, va5c0, vb01c0);
139         #endif
140         const float32x2_t vb01c1 = vld1_f32(w); w += 2;
141 
142         #if XNN_ARCH_ARM64
143           vacc0x01 = vfma_lane_f32(vacc0x01, vb01c1, va0, 1);
144           vacc1x01 = vfma_lane_f32(vacc1x01, vb01c1, va1, 1);
145           vacc2x01 = vfma_lane_f32(vacc2x01, vb01c1, va2, 1);
146           vacc3x01 = vfma_lane_f32(vacc3x01, vb01c1, va3, 1);
147           vacc4x01 = vfma_lane_f32(vacc4x01, vb01c1, va4, 1);
148           vacc5x01 = vfma_lane_f32(vacc5x01, vb01c1, va5, 1);
149         #else
150           const float32x2_t va0c1 = vdup_lane_f32(va0, 1);
151           const float32x2_t va1c1 = vdup_lane_f32(va1, 1);
152           const float32x2_t va2c1 = vdup_lane_f32(va2, 1);
153           const float32x2_t va3c1 = vdup_lane_f32(va3, 1);
154           const float32x2_t va4c1 = vdup_lane_f32(va4, 1);
155           const float32x2_t va5c1 = vdup_lane_f32(va5, 1);
156           vacc0x01 = vfma_f32(vacc0x01, va0c1, vb01c1);
157           vacc1x01 = vfma_f32(vacc1x01, va1c1, vb01c1);
158           vacc2x01 = vfma_f32(vacc2x01, va2c1, vb01c1);
159           vacc3x01 = vfma_f32(vacc3x01, va3c1, vb01c1);
160           vacc4x01 = vfma_f32(vacc4x01, va4c1, vb01c1);
161           vacc5x01 = vfma_f32(vacc5x01, va5c1, vb01c1);
162         #endif
163       }
164       if XNN_UNLIKELY(k != 0) {
165         const float32x2_t va0 = vld1_dup_f32(a0);
166         const float32x2_t va1 = vld1_dup_f32(a1);
167         const float32x2_t va2 = vld1_dup_f32(a2);
168         const float32x2_t va3 = vld1_dup_f32(a3);
169         const float32x2_t va4 = vld1_dup_f32(a4);
170         const float32x2_t va5 = vld1_dup_f32(a5);
171 
172         const float32x2_t vb01 = vld1_f32(w); w += 2;
173 
174         vacc0x01 = vfma_f32(vacc0x01, va0, vb01);
175         vacc1x01 = vfma_f32(vacc1x01, va1, vb01);
176         vacc2x01 = vfma_f32(vacc2x01, va2, vb01);
177         vacc3x01 = vfma_f32(vacc3x01, va3, vb01);
178         vacc4x01 = vfma_f32(vacc4x01, va4, vb01);
179         vacc5x01 = vfma_f32(vacc5x01, va5, vb01);
180       }
181       p -= 6 * sizeof(void*);
182     } while (p != 0);
183 
184     const float32x2_t vmax = vld1_dup_f32(&params->scalar.max);
185     vacc0x01 = vmin_f32(vacc0x01, vmax);
186     vacc1x01 = vmin_f32(vacc1x01, vmax);
187     vacc2x01 = vmin_f32(vacc2x01, vmax);
188     vacc3x01 = vmin_f32(vacc3x01, vmax);
189     vacc4x01 = vmin_f32(vacc4x01, vmax);
190     vacc5x01 = vmin_f32(vacc5x01, vmax);
191 
192     const float32x2_t vmin = vld1_dup_f32(&params->scalar.min);
193     vacc0x01 = vmax_f32(vacc0x01, vmin);
194     vacc1x01 = vmax_f32(vacc1x01, vmin);
195     vacc2x01 = vmax_f32(vacc2x01, vmin);
196     vacc3x01 = vmax_f32(vacc3x01, vmin);
197     vacc4x01 = vmax_f32(vacc4x01, vmin);
198     vacc5x01 = vmax_f32(vacc5x01, vmin);
199 
200     if XNN_LIKELY(nc >= 2) {
201       vst1_f32(c5, vacc5x01);
202       c5 = (float*) ((uintptr_t) c5 + cn_stride);
203       vst1_f32(c4, vacc4x01);
204       c4 = (float*) ((uintptr_t) c4 + cn_stride);
205       vst1_f32(c3, vacc3x01);
206       c3 = (float*) ((uintptr_t) c3 + cn_stride);
207       vst1_f32(c2, vacc2x01);
208       c2 = (float*) ((uintptr_t) c2 + cn_stride);
209       vst1_f32(c1, vacc1x01);
210       c1 = (float*) ((uintptr_t) c1 + cn_stride);
211       vst1_f32(c0, vacc0x01);
212       c0 = (float*) ((uintptr_t) c0 + cn_stride);
213 
214       a = (const float**restrict) ((uintptr_t) a - ks);
215       nc -= 2;
216     } else {
217       assert(nc == 1);
218       vst1_lane_f32(c5, vacc5x01, 0);
219       vst1_lane_f32(c4, vacc4x01, 0);
220       vst1_lane_f32(c3, vacc3x01, 0);
221       vst1_lane_f32(c2, vacc2x01, 0);
222       vst1_lane_f32(c1, vacc1x01, 0);
223       vst1_lane_f32(c0, vacc0x01, 0);
224 
225       nc = 0;
226     }
227   } while (nc != 0);
228 }
229