xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/neon-rr2-lut64-p2-x8-acc2.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
19 
xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8_acc2(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8_acc2(
21     size_t elements,
22     const float* input,
23     const float* max,
24     float* output,
25     float* sum,
26     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(elements % sizeof(float) == 0);
29 
30   const float32x4_t vi_max = vld1q_dup_f32(max);
31   const float32x4_t vlog2e = vld1q_dup_f32(&params->neon_rr2_lut64_p2.log2e);
32   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_rr2_lut64_p2.magic_bias);
33   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
34   const float32x4_t vminus_ln2_hi = vld1q_dup_f32(&params->neon_rr2_lut64_p2.minus_ln2_hi);
35   const float32x4_t vminus_ln2_lo = vld1q_dup_f32(&params->neon_rr2_lut64_p2.minus_ln2_lo);
36   const float32x4_t vc2 = vld1q_dup_f32(&params->neon_rr2_lut64_p2.c2);
37   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neon_rr2_lut64_p2.denorm_cutoff);
38 
39   float32x4_t vacc0 = vmovq_n_f32(0.0f);
40   float32x4_t vacc1 = vmovq_n_f32(0.0f);
41   for (; elements >= 8 * sizeof(float); elements -= 8 * sizeof(float)) {
42     const float32x4_t vi0123 = vld1q_f32(input); input += 4;
43     const float32x4_t vi4567 = vld1q_f32(input); input += 4;
44 
45     const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
46     const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
47 
48     float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
49     float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
50 
51     const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
52     const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
53 
54     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
55     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
56     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
57     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
58     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
59     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
60 
61     float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
62     float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
63     float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
64     float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
65 
66     vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
67     vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
68     const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
69     vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
70     vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
71     const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
72 
73     const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
74     const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
75 
76     vn0123 = vsubq_f32(vn0123, vmagic_bias);
77     vn4567 = vsubq_f32(vn4567, vmagic_bias);
78 
79     float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
80     float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
81 
82     vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
83     vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
84 
85     float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
86     float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
87 
88     vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
89     vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
90 
91     float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
92     float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
93 
94     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
95     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
96 
97     vst1q_f32(output, vf0123); output += 4;
98     vst1q_f32(output, vf4567); output += 4;
99 
100     vacc0 = vaddq_f32(vacc0, vf0123);
101     vacc0 = vaddq_f32(vacc0, vf4567);
102   }
103   vacc0 = vaddq_f32(vacc0, vacc1);
104 
105   float32x4_t vacc = vacc0;
106   for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
107     const float32x4_t vi = vld1q_f32(input); input += 4;
108 
109     const float32x4_t vx = vsubq_f32(vi, vi_max);
110 
111     float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
112 
113     const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
114 
115     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
116     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
117     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
118     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
119     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
120     vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
121     vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
122     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
123     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
124 
125     vn = vsubq_f32(vn, vmagic_bias);
126 
127     float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
128     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
129 
130     float32x4_t vp = vmulq_f32(vt, vc2);
131     vp = vmlaq_f32(vt, vt, vp);
132 
133     float32x4_t vf = vmlaq_f32(vs, vs, vp);
134 
135     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
136 
137     vst1q_f32(output, vf); output += 4;
138 
139     vacc = vaddq_f32(vacc, vf);
140   }
141 #if XNN_ARCH_ARM64
142   float vacc_lo = vaddvq_f32(vacc);
143 #else
144   float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
145 #endif
146   if (elements != 0) {
147     assert(elements >= 1 * sizeof(float));
148     assert(elements <= 3 * sizeof(float));
149     const float32x4_t vi = vld1q_f32(input); input += 4;
150 
151     const float32x4_t vx = vsubq_f32(vi, vi_max);
152 
153     float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
154 
155     const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
156 
157     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
158     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
159     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
160     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
161     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
162     vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
163     vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
164     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
165     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
166 
167     vn = vsubq_f32(vn, vmagic_bias);
168 
169     float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
170     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
171 
172     float32x4_t vp = vmulq_f32(vt, vc2);
173     vp = vmlaq_f32(vt, vt, vp);
174 
175     float32x4_t vf = vmlaq_f32(vs, vs, vp);
176 
177     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
178 
179     float32x2_t vf_lo = vget_low_f32(vf);
180     if (elements & (2 * sizeof(float))) {
181       vst1_f32(output, vf_lo); output += 2;
182 
183       #if XNN_ARCH_ARM64
184         vacc_lo += vaddv_f32(vf_lo);
185       #else
186         vacc_lo = vadd_f32(vacc_lo, vf_lo);
187       #endif
188 
189       vf_lo = vget_high_f32(vf);
190     }
191     if (elements & (1 * sizeof(float))) {
192       vst1_lane_f32(output, vf_lo, 0);
193 
194       #if XNN_ARCH_ARM64
195         vacc_lo += vget_lane_f32(vf_lo, 0);
196       #else
197         vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
198       #endif
199     }
200   }
201 #if XNN_ARCH_ARM64
202   *sum = vacc_lo;
203 #else
204   vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
205 #endif
206 }
207