xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-wasmsimd-rr2-lut64-p2-div-x8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19 
xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x8(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x8(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
29   const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
30   const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
31   const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
32   const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
33   const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
34   const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
35   const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
36 
37   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
38     const v128_t vx0123 = wasm_v128_load(x);
39     const v128_t vx4567 = wasm_v128_load(x + 4);
40     x += 8;
41 
42     const v128_t vz0123 = wasm_f32x4_abs(vx0123);
43     const v128_t vz4567 = wasm_f32x4_abs(vx4567);
44 
45     v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vminus_log2e));
46     v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vminus_log2e));
47 
48     const v128_t ve0123 = wasm_i32x4_shl(vn0123, 17);
49     const v128_t ve4567 = wasm_i32x4_shl(vn4567, 17);
50 
51     const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
52     const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
53 
54     const uint64_t vidx01 = wasm_i64x2_extract_lane(vidx0123, 0);
55     const uint64_t vidx23 = wasm_i64x2_extract_lane(vidx0123, 1);
56     const float vl0   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx01));
57     const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx01 >> 32)));
58     const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx23));
59     const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx23 >> 32)));
60     const v128_t vl0123 = wasm_f32x4_make(vl0, vl1, vl2, vl3);
61     const uint64_t vidx45 = wasm_i64x2_extract_lane(vidx4567, 0);
62     const uint64_t vidx67 = wasm_i64x2_extract_lane(vidx4567, 1);
63     const float vl4   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx45));
64     const float vl5 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx45 >> 32)));
65     const float vl6 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx67));
66     const float vl7 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx67 >> 32)));
67     const v128_t vl4567 = wasm_f32x4_make(vl4, vl5, vl6, vl7);
68 
69     const v128_t vs0123 = wasm_i32x4_add(vl0123, ve0123);
70     const v128_t vs4567 = wasm_i32x4_add(vl4567, ve4567);
71 
72     vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
73     vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
74 
75     v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vln2_hi));
76     v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vln2_hi));
77 
78     vt0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vn0123, vln2_lo));
79     vt4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vn4567, vln2_lo));
80 
81     v128_t vp0123 = wasm_f32x4_mul(vt0123, vc2);
82     v128_t vp4567 = wasm_f32x4_mul(vt4567, vc2);
83 
84     vp0123 = wasm_f32x4_sub(vt0123, wasm_f32x4_mul(vp0123, vt0123));
85     vp4567 = wasm_f32x4_sub(vt4567, wasm_f32x4_mul(vp4567, vt4567));
86 
87     const v128_t vy0123 = wasm_f32x4_sub(vs0123, wasm_f32x4_mul(vs0123, vp0123));
88     const v128_t vy4567 = wasm_f32x4_sub(vs4567, wasm_f32x4_mul(vs4567, vp4567));
89 
90     const v128_t vd0123 = wasm_f32x4_add(vy0123, vone);
91     const v128_t vd4567 = wasm_f32x4_add(vy4567, vone);
92 
93     v128_t vf0123 = wasm_f32x4_div(vy0123, vd0123);
94     v128_t vf4567 = wasm_f32x4_div(vy4567, vd4567);
95 
96     vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
97     vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
98 
99     vf0123 = wasm_v128_bitselect(vf0123, wasm_f32x4_sub(vone, vf0123), wasm_i32x4_shr(vx0123, 31));
100     vf4567 = wasm_v128_bitselect(vf4567, wasm_f32x4_sub(vone, vf4567), wasm_i32x4_shr(vx4567, 31));
101 
102     wasm_v128_store(y, vf0123);
103     wasm_v128_store(y + 4, vf4567);
104     y += 8;
105   }
106   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
107     const v128_t vx = wasm_v128_load(x);
108     x += 4;
109 
110     const v128_t vz = wasm_f32x4_abs(vx);
111 
112     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
113     const v128_t ve = wasm_i32x4_shl(vn, 17);
114 
115     const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
116     const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
117     const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
118     const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
119     const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
120     const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
121     const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
122     const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
123 
124     const v128_t vs = wasm_i32x4_add(vl, ve);
125     vn = wasm_f32x4_sub(vn, vmagic_bias);
126 
127     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
128     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
129 
130     v128_t vp = wasm_f32x4_mul(vt, vc2);
131     vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
132 
133     const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
134     const v128_t vd = wasm_f32x4_add(vy, vone);
135 
136     v128_t vf = wasm_f32x4_div(vy, vd);
137     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
138     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
139 
140     wasm_v128_store(y, vf);
141     y += 4;
142   }
143   if XNN_UNLIKELY(n != 0) {
144     const v128_t vx = wasm_v128_load(x);
145 
146     const v128_t vz = wasm_f32x4_abs(vx);
147 
148     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
149     const v128_t ve = wasm_i32x4_shl(vn, 17);
150 
151     const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
152     const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
153     const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
154     const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
155     const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
156     const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
157     const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
158     const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
159 
160     const v128_t vs = wasm_i32x4_add(vl, ve);
161     vn = wasm_f32x4_sub(vn, vmagic_bias);
162 
163     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
164     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
165 
166     v128_t vp = wasm_f32x4_mul(vt, vc2);
167     vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
168 
169     const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
170     const v128_t vd = wasm_f32x4_add(vy, vone);
171 
172     v128_t vf = wasm_f32x4_div(vy, vd);
173     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
174     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
175 
176     if (n & (2 * sizeof(float))) {
177       *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
178       vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
179       y += 2;
180     }
181     if (n & (1 * sizeof(float))) {
182       *y = wasm_f32x4_extract_lane(vf, 0);
183     }
184   }
185 }
186