1 // Auto-generated file. Do not edit!
2 // Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16
17
xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc5(size_t batch,const void * input,const void * max,void * output,void * sum,const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80_acc5(
19 size_t batch,
20 const void* input,
21 const void* max,
22 void* output,
23 void* sum,
24 const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(batch % sizeof(__fp16) == 0);
27
28 const float16x8_t vi_max = vld1q_dup_f16(max);
29 const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.log2e));
30 const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.magic_bias));
31 const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.minus_ln2_hi));
32 const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.minus_ln2_lo));
33 const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.c2));
34 const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.c1));
35 const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr2_p2.denorm_cutoff));
36
37 const __fp16* i = (const __fp16*) input;
38 __fp16* o = (__fp16*) output;
39 float16x8_t vacc0 = vmovq_n_f16(0.0f);
40 float16x8_t vacc1 = vmovq_n_f16(0.0f);
41 float16x8_t vacc2 = vmovq_n_f16(0.0f);
42 float16x8_t vacc3 = vmovq_n_f16(0.0f);
43 float16x8_t vacc4 = vmovq_n_f16(0.0f);
44 for (; batch >= 80 * sizeof(__fp16); batch -= 80 * sizeof(__fp16)) {
45 const float16x8_t vi0 = vld1q_f16(i); i += 8;
46 const float16x8_t vi1 = vld1q_f16(i); i += 8;
47 const float16x8_t vi2 = vld1q_f16(i); i += 8;
48 const float16x8_t vi3 = vld1q_f16(i); i += 8;
49 const float16x8_t vi4 = vld1q_f16(i); i += 8;
50 const float16x8_t vi5 = vld1q_f16(i); i += 8;
51 const float16x8_t vi6 = vld1q_f16(i); i += 8;
52 const float16x8_t vi7 = vld1q_f16(i); i += 8;
53 const float16x8_t vi8 = vld1q_f16(i); i += 8;
54 const float16x8_t vi9 = vld1q_f16(i); i += 8;
55
56 const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
57 const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
58 const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
59 const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
60 const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
61 const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
62 const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
63 const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
64 const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
65 const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
66
67 float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
68 float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
69 float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
70 float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
71 float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
72 float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
73 float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
74 float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
75 float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
76 float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
77
78 const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
79 const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
80 const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
81 const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
82 const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
83 const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
84 const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
85 const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
86 const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
87 const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
88
89 vn0 = vsubq_f16(vn0, vmagic_bias);
90 vn1 = vsubq_f16(vn1, vmagic_bias);
91 vn2 = vsubq_f16(vn2, vmagic_bias);
92 vn3 = vsubq_f16(vn3, vmagic_bias);
93 vn4 = vsubq_f16(vn4, vmagic_bias);
94 vn5 = vsubq_f16(vn5, vmagic_bias);
95 vn6 = vsubq_f16(vn6, vmagic_bias);
96 vn7 = vsubq_f16(vn7, vmagic_bias);
97 vn8 = vsubq_f16(vn8, vmagic_bias);
98 vn9 = vsubq_f16(vn9, vmagic_bias);
99
100 float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
101 float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
102 float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
103 float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
104 float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
105 float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
106 float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
107 float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
108 float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
109 float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
110
111 vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
112 vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
113 vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
114 vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
115 vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
116 vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
117 vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
118 vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
119 vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
120 vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
121
122 const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
123 const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
124 const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
125 const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
126 const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
127 const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
128 const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
129 const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
130 const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
131 const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
132
133 vt0 = vmulq_f16(vt0, vs0);
134 vt1 = vmulq_f16(vt1, vs1);
135 vt2 = vmulq_f16(vt2, vs2);
136 vt3 = vmulq_f16(vt3, vs3);
137 vt4 = vmulq_f16(vt4, vs4);
138 vt5 = vmulq_f16(vt5, vs5);
139 vt6 = vmulq_f16(vt6, vs6);
140 vt7 = vmulq_f16(vt7, vs7);
141 vt8 = vmulq_f16(vt8, vs8);
142 vt9 = vmulq_f16(vt9, vs9);
143
144 float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
145 const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
146 float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
147 const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
148 float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
149 const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
150 float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
151 const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
152 float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
153 const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
154 float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
155 const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
156 float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
157 const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
158 float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
159 const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
160 float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
161 const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
162 float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
163 const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
164
165 vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
166 vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
167 vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
168 vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
169 vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
170 vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
171 vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
172 vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
173 vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
174 vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
175
176 vst1q_f16(o, vf0); o += 8;
177 vst1q_f16(o, vf1); o += 8;
178 vst1q_f16(o, vf2); o += 8;
179 vst1q_f16(o, vf3); o += 8;
180 vst1q_f16(o, vf4); o += 8;
181 vst1q_f16(o, vf5); o += 8;
182 vst1q_f16(o, vf6); o += 8;
183 vst1q_f16(o, vf7); o += 8;
184 vst1q_f16(o, vf8); o += 8;
185 vst1q_f16(o, vf9); o += 8;
186
187 vacc0 = vaddq_f16(vacc0, vf0);
188 vacc1 = vaddq_f16(vacc1, vf1);
189 vacc2 = vaddq_f16(vacc2, vf2);
190 vacc3 = vaddq_f16(vacc3, vf3);
191 vacc4 = vaddq_f16(vacc4, vf4);
192 vacc0 = vaddq_f16(vacc0, vf5);
193 vacc1 = vaddq_f16(vacc1, vf6);
194 vacc2 = vaddq_f16(vacc2, vf7);
195 vacc3 = vaddq_f16(vacc3, vf8);
196 vacc4 = vaddq_f16(vacc4, vf9);
197 }
198 vacc0 = vaddq_f16(vacc0, vacc1);
199 vacc2 = vaddq_f16(vacc2, vacc3);
200 vacc0 = vaddq_f16(vacc0, vacc2);
201 vacc0 = vaddq_f16(vacc0, vacc4);
202
203 float16x8_t vacc = vacc0;
204 for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
205 const float16x8_t vi = vld1q_f16(i); i += 8;
206
207 const float16x8_t vx = vsubq_f16(vi, vi_max);
208
209 float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
210 const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
211 vn = vsubq_f16(vn, vmagic_bias);
212
213 float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
214 vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
215
216 const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
217 vt = vmulq_f16(vt, vs);
218
219 float16x8_t vf = vfmaq_f16(vs, vp, vt);
220 const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
221 vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
222
223 vst1q_f16(o, vf); o += 8;
224
225 vacc = vaddq_f16(vacc, vf);
226 }
227 float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
228 if (batch != 0) {
229 assert(batch >= 1 * sizeof(__fp16));
230 assert(batch <= 7 * sizeof(__fp16));
231 const float16x8_t vi = vld1q_f16(i);
232
233 const float16x8_t vx = vsubq_f16(vi, vi_max);
234
235 float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
236 const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
237 vn = vsubq_f16(vn, vmagic_bias);
238
239 float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
240 vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
241
242 const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
243 vt = vmulq_f16(vt, vs);
244
245 float16x8_t vf = vfmaq_f16(vs, vp, vt);
246 const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
247 vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
248
249 float16x4_t vf_lo = vget_low_f16(vf);
250 if (batch & (4 * sizeof(__fp16))) {
251 vst1_f16(o, vf_lo); o += 4;
252 vacc_lo = vadd_f16(vacc_lo, vf_lo);
253 vf_lo = vget_high_f16(vf);
254 }
255 if (batch & (2 * sizeof(__fp16))) {
256 vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
257 vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
258 vf_lo = vext_f16(vf_lo, vf_lo, 2);
259 }
260 if (batch & (1 * sizeof(__fp16))) {
261 vst1_lane_f16(o, vf_lo, 0);
262 vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
263 }
264 }
265 vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
266 *((__fp16*) sum) = vget_lane_f16(vacc_lo, 0) + vget_lane_f16(vacc_lo, 1);
267 }
268