xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/neonfma-rr1-lut64-p2-x20.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
19 
xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20(
21     size_t elements,
22     const float* input,
23     const float* max,
24     float* output,
25     float* sum,
26     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(elements % sizeof(float) == 0);
29 
30   const float32x4_t vi_max = vld1q_dup_f32(max);
31   const float32x4_t vlog2e = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.log2e);
32   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.magic_bias);
33   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
34   const float32x4_t vminus_ln2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.minus_ln2);
35   const float32x4_t vc2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.c2);
36   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.denorm_cutoff);
37 
38   float32x4_t vacc0 = vmovq_n_f32(0.0f);
39   for (; elements >= 20 * sizeof(float); elements -= 20 * sizeof(float)) {
40     const float32x4_t vi0123 = vld1q_f32(input); input += 4;
41     const float32x4_t vi4567 = vld1q_f32(input); input += 4;
42     const float32x4_t vi89AB = vld1q_f32(input); input += 4;
43     const float32x4_t viCDEF = vld1q_f32(input); input += 4;
44     const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
45 
46     const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
47     const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
48     const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
49     const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
50     const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
51 
52     float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
53     float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
54     float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
55     float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
56     float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
57 
58     const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
59     const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
60     const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
61     const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
62     const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
63 
64     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
65     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
66     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
67     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
68     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
69     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
70     const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
71     const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
72     const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
73     const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
74     const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
75     const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
76     const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
77     const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
78     const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
79 
80     float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
81     float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
82     float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
83     float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
84     float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
85     float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
86     float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
87     float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
88     float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
89     float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
90 
91     vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
92     vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
93     const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
94     vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
95     vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
96     const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
97     vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
98     vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
99     const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
100     vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
101     vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
102     const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
103     vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
104     vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
105     const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
106 
107     const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
108     const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
109     const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
110     const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
111     const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
112 
113     vn0123 = vsubq_f32(vn0123, vmagic_bias);
114     vn4567 = vsubq_f32(vn4567, vmagic_bias);
115     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
116     vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
117     vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
118 
119     float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
120     float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
121     float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
122     float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
123     float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
124 
125     float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
126     float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
127     float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
128     float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
129     float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
130 
131     vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
132     vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
133     vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
134     vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
135     vpGHIJ = vfmaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
136 
137     float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
138     float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
139     float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
140     float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
141     float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
142 
143     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
144     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
145     vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
146     vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
147     vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
148 
149     vst1q_f32(output, vf0123); output += 4;
150     vst1q_f32(output, vf4567); output += 4;
151     vst1q_f32(output, vf89AB); output += 4;
152     vst1q_f32(output, vfCDEF); output += 4;
153     vst1q_f32(output, vfGHIJ); output += 4;
154 
155     vacc0 = vaddq_f32(vacc0, vf0123);
156     vacc0 = vaddq_f32(vacc0, vf4567);
157     vacc0 = vaddq_f32(vacc0, vf89AB);
158     vacc0 = vaddq_f32(vacc0, vfCDEF);
159     vacc0 = vaddq_f32(vacc0, vfGHIJ);
160   }
161 
162   float32x4_t vacc = vacc0;
163   for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
164     const float32x4_t vi = vld1q_f32(input); input += 4;
165 
166     const float32x4_t vx = vsubq_f32(vi, vi_max);
167 
168     float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
169 
170     const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
171 
172     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
173     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
174     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
175     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
176     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
177     vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
178     vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
179     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
180     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
181 
182     vn = vsubq_f32(vn, vmagic_bias);
183 
184     float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
185 
186     float32x4_t vp = vmulq_f32(vt, vc2);
187     vp = vfmaq_f32(vt, vt, vp);
188 
189     float32x4_t vf = vfmaq_f32(vs, vs, vp);
190 
191     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
192 
193     vst1q_f32(output, vf); output += 4;
194 
195     vacc = vaddq_f32(vacc, vf);
196   }
197 #if XNN_ARCH_ARM64
198   float vacc_lo = vaddvq_f32(vacc);
199 #else
200   float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
201 #endif
202   if (elements != 0) {
203     assert(elements >= 1 * sizeof(float));
204     assert(elements <= 3 * sizeof(float));
205     const float32x4_t vi = vld1q_f32(input); input += 4;
206 
207     const float32x4_t vx = vsubq_f32(vi, vi_max);
208 
209     float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
210 
211     const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
212 
213     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
214     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
215     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
216     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
217     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
218     vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
219     vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
220     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
221     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
222 
223     vn = vsubq_f32(vn, vmagic_bias);
224 
225     float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
226 
227     float32x4_t vp = vmulq_f32(vt, vc2);
228     vp = vfmaq_f32(vt, vt, vp);
229 
230     float32x4_t vf = vfmaq_f32(vs, vs, vp);
231 
232     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
233 
234     float32x2_t vf_lo = vget_low_f32(vf);
235     if (elements & (2 * sizeof(float))) {
236       vst1_f32(output, vf_lo); output += 2;
237 
238       #if XNN_ARCH_ARM64
239         vacc_lo += vaddv_f32(vf_lo);
240       #else
241         vacc_lo = vadd_f32(vacc_lo, vf_lo);
242       #endif
243 
244       vf_lo = vget_high_f32(vf);
245     }
246     if (elements & (1 * sizeof(float))) {
247       vst1_lane_f32(output, vf_lo, 0);
248 
249       #if XNN_ARCH_ARM64
250         vacc_lo += vget_lane_f32(vf_lo, 0);
251       #else
252         vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
253       #endif
254     }
255   }
256 #if XNN_ARCH_ARM64
257   *sum = vacc_lo;
258 #else
259   vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
260 #endif
261 }
262