1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19
xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x4(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x4(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(n % sizeof(float) == 0);
27
28 const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
29 const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
30 const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
31 const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
32 const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
33 const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
34 const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
35 const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
36
37 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
38 const v128_t vx = wasm_v128_load(x);
39 x += 4;
40
41 const v128_t vz = wasm_f32x4_abs(vx);
42
43 v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
44 const v128_t ve = wasm_i32x4_shl(vn, 17);
45
46 const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
47 const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
48 const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
49 const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
50 const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
51 const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
52 const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
53 const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
54
55 const v128_t vs = wasm_i32x4_add(vl, ve);
56 vn = wasm_f32x4_sub(vn, vmagic_bias);
57
58 v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
59 vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
60
61 v128_t vp = wasm_f32x4_mul(vt, vc2);
62 vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
63
64 const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
65 const v128_t vd = wasm_f32x4_add(vy, vone);
66
67 v128_t vf = wasm_f32x4_div(vy, vd);
68 vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
69 vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
70
71 wasm_v128_store(y, vf);
72 y += 4;
73 }
74 if XNN_UNLIKELY(n != 0) {
75 const v128_t vx = wasm_v128_load(x);
76
77 const v128_t vz = wasm_f32x4_abs(vx);
78
79 v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
80 const v128_t ve = wasm_i32x4_shl(vn, 17);
81
82 const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
83 const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
84 const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
85 const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
86 const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
87 const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
88 const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
89 const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
90
91 const v128_t vs = wasm_i32x4_add(vl, ve);
92 vn = wasm_f32x4_sub(vn, vmagic_bias);
93
94 v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
95 vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
96
97 v128_t vp = wasm_f32x4_mul(vt, vc2);
98 vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
99
100 const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
101 const v128_t vd = wasm_f32x4_add(vy, vone);
102
103 v128_t vf = wasm_f32x4_div(vy, vd);
104 vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
105 vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
106
107 if (n & (2 * sizeof(float))) {
108 *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
109 vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
110 y += 2;
111 }
112 if (n & (1 * sizeof(float))) {
113 *y = wasm_f32x4_extract_lane(vf, 0);
114 }
115 }
116 }
117