1 // Auto-generated file. Do not edit!
2 // Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16
17
xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc2(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc2(
19 size_t elements,
20 const float* input,
21 const float* max,
22 float* output,
23 float* sum,
24 const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(elements % sizeof(float) == 0);
27
28 const float32x4_t vi_max = vld1q_dup_f32(max);
29 const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
30 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
31 const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
32 const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
33 const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
34 const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
35 const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
36 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
37 const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
38 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
39
40 float32x4_t vacc0 = vmovq_n_f32(0.0f);
41 float32x4_t vacc1 = vmovq_n_f32(0.0f);
42 for (; elements >= 20 * sizeof(float); elements -= 20 * sizeof(float)) {
43 const float32x4_t vi0123 = vld1q_f32(input); input += 4;
44 const float32x4_t vi4567 = vld1q_f32(input); input += 4;
45 const float32x4_t vi89AB = vld1q_f32(input); input += 4;
46 const float32x4_t viCDEF = vld1q_f32(input); input += 4;
47 const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
48
49 const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
50 const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
51 const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
52 const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
53 const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
54
55 float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
56 float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
57 float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
58 float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
59 float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
60
61 const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
62 const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
63 const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
64 const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
65 const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
66
67 vn0123 = vsubq_f32(vn0123, vmagic_bias);
68 vn4567 = vsubq_f32(vn4567, vmagic_bias);
69 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
70 vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
71 vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
72
73 float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
74 float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
75 float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
76 float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
77 float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
78
79 vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
80 vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
81 vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
82 vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
83 vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
84
85 float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
86 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
87 float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
88 float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
89 float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
90
91 vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
92 vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
93 vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
94 vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
95 vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
96
97 vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
98 vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
99 vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
100 vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
101 vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
102
103 vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
104 vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
105 vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
106 vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
107 vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
108
109 vt0123 = vmulq_f32(vt0123, vs0123);
110 vt4567 = vmulq_f32(vt4567, vs4567);
111 vt89AB = vmulq_f32(vt89AB, vs89AB);
112 vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
113 vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
114
115 float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
116 float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
117 float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
118 float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
119 float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
120
121 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
122 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
123 vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
124 vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
125 vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
126
127 vst1q_f32(output, vf0123); output += 4;
128 vst1q_f32(output, vf4567); output += 4;
129 vst1q_f32(output, vf89AB); output += 4;
130 vst1q_f32(output, vfCDEF); output += 4;
131 vst1q_f32(output, vfGHIJ); output += 4;
132
133 vacc0 = vaddq_f32(vacc0, vf0123);
134 vacc0 = vaddq_f32(vacc0, vf4567);
135 vacc0 = vaddq_f32(vacc0, vf89AB);
136 vacc0 = vaddq_f32(vacc0, vfCDEF);
137 vacc0 = vaddq_f32(vacc0, vfGHIJ);
138 }
139 vacc0 = vaddq_f32(vacc0, vacc1);
140
141 float32x4_t vacc = vacc0;
142 for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
143 const float32x4_t vi = vld1q_f32(input); input += 4;
144
145 const float32x4_t vx = vsubq_f32(vi, vi_max);
146
147 float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
148
149 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
150
151 vn = vsubq_f32(vn, vmagic_bias);
152
153 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
154 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
155
156 float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
157 vp = vmlaq_f32(vc3, vp, vt);
158 vp = vmlaq_f32(vc2, vp, vt);
159 vp = vmlaq_f32(vc1, vp, vt);
160
161 vt = vmulq_f32(vt, vs);
162 float32x4_t vf = vmlaq_f32(vs, vp, vt);
163
164 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
165
166 vst1q_f32(output, vf); output += 4;
167
168 vacc = vaddq_f32(vacc, vf);
169 }
170 #if XNN_ARCH_ARM64
171 float vacc_lo = vaddvq_f32(vacc);
172 #else
173 float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
174 #endif
175 if (elements != 0) {
176 assert(elements >= 1 * sizeof(float));
177 assert(elements <= 3 * sizeof(float));
178 const float32x4_t vi = vld1q_f32(input); input += 4;
179
180 const float32x4_t vx = vsubq_f32(vi, vi_max);
181
182 float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
183
184 const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
185
186 vn = vsubq_f32(vn, vmagic_bias);
187
188 float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
189 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
190
191 float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
192 vp = vmlaq_f32(vc3, vp, vt);
193 vp = vmlaq_f32(vc2, vp, vt);
194 vp = vmlaq_f32(vc1, vp, vt);
195
196 vt = vmulq_f32(vt, vs);
197 float32x4_t vf = vmlaq_f32(vs, vp, vt);
198
199 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
200
201 float32x2_t vf_lo = vget_low_f32(vf);
202 if (elements & (2 * sizeof(float))) {
203 vst1_f32(output, vf_lo); output += 2;
204
205 #if XNN_ARCH_ARM64
206 vacc_lo += vaddv_f32(vf_lo);
207 #else
208 vacc_lo = vadd_f32(vacc_lo, vf_lo);
209 #endif
210
211 vf_lo = vget_high_f32(vf);
212 }
213 if (elements & (1 * sizeof(float))) {
214 vst1_lane_f32(output, vf_lo, 0);
215
216 #if XNN_ARCH_ARM64
217 vacc_lo += vget_lane_f32(vf_lo, 0);
218 #else
219 vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
220 #endif
221 }
222 }
223 #if XNN_ARCH_ARM64
224 *sum = vacc_lo;
225 #else
226 vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
227 #endif
228 }
229