xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/wasmsimd-rr2-lut64-p2-div.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
18
19void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_lut64_p2_div_x${BATCH_TILE}(
20    size_t n,
21    const float* x,
22    float* y,
23    const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24{
25  assert(n % sizeof(float) == 0);
26
27  const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.magic_bias);
28  const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.minus_log2e);
29  const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.index_mask);
30  const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_hi);
31  const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.ln2_lo);
32  const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.c2);
33  const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.one);
34  const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut64_p2.denorm_cutoff);
35
36  $if BATCH_TILE > 4:
37    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
38      const v128_t vx${ABC[0:4]} = wasm_v128_load(x);
39      $for N in range(4, BATCH_TILE, 4):
40        const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
41      x += ${BATCH_TILE};
42
43      $for N in range(0, BATCH_TILE, 4):
44        const v128_t vz${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
45
46      $for N in range(0, BATCH_TILE, 4):
47        v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz${ABC[N:N+4]}, vminus_log2e));
48
49      $for N in range(0, BATCH_TILE, 4):
50        const v128_t ve${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 17);
51
52      $for N in range(0, BATCH_TILE, 4):
53        const v128_t vidx${ABC[N:N+4]} = wasm_i32x4_shl(wasm_v128_and(vn${ABC[N:N+4]}, vindex_mask), 2);
54
55      $for N in range(0, BATCH_TILE, 4):
56        const uint64_t vidx${ABC[N:N+2]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 0);
57        const uint64_t vidx${ABC[N+2:N+4]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 1);
58        const float vl${ABC[N]}   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx${ABC[N:N+2]}));
59        const float vl${ABC[N+1]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N:N+2]} >> 32)));
60        const float vl${ABC[N+2]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx${ABC[N+2:N+4]}));
61        const float vl${ABC[N+3]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32)));
62        const v128_t vl${ABC[N:N+4]} = wasm_f32x4_make(vl${ABC[N]}, vl${ABC[N+1]}, vl${ABC[N+2]}, vl${ABC[N+3]});
63
64      $for N in range(0, BATCH_TILE, 4):
65        const v128_t vs${ABC[N:N+4]} = wasm_i32x4_add(vl${ABC[N:N+4]}, ve${ABC[N:N+4]});
66
67      $for N in range(0, BATCH_TILE, 4):
68        vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
69
70      $for N in range(0, BATCH_TILE, 4):
71        v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(vz${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_hi));
72
73      $for N in range(0, BATCH_TILE, 4):
74        vt${ABC[N:N+4]} = wasm_f32x4_add(vt${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_lo));
75
76      $for N in range(0, BATCH_TILE, 4):
77        v128_t vp${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vc2);
78
79      $for N in range(0, BATCH_TILE, 4):
80        vp${ABC[N:N+4]} = wasm_f32x4_sub(vt${ABC[N:N+4]}, wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}));
81
82      $for N in range(0, BATCH_TILE, 4):
83        const v128_t vy${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, wasm_f32x4_mul(vs${ABC[N:N+4]}, vp${ABC[N:N+4]}));
84
85      $for N in range(0, BATCH_TILE, 4):
86        const v128_t vd${ABC[N:N+4]} = wasm_f32x4_add(vy${ABC[N:N+4]}, vone);
87
88      $for N in range(0, BATCH_TILE, 4):
89        v128_t vf${ABC[N:N+4]} = wasm_f32x4_div(vy${ABC[N:N+4]}, vd${ABC[N:N+4]});
90
91      $for N in range(0, BATCH_TILE, 4):
92        vf${ABC[N:N+4]} = wasm_v128_andnot(vf${ABC[N:N+4]}, wasm_f32x4_gt(vz${ABC[N:N+4]}, vdenorm_cutoff));
93
94      $for N in range(0, BATCH_TILE, 4):
95        vf${ABC[N:N+4]} = wasm_v128_bitselect(vf${ABC[N:N+4]}, wasm_f32x4_sub(vone, vf${ABC[N:N+4]}), wasm_i32x4_shr(vx${ABC[N:N+4]}, 31));
96
97      wasm_v128_store(y, vf${ABC[0:4]});
98      $for N in range(4, BATCH_TILE, 4):
99        wasm_v128_store(y + ${N}, vf${ABC[N:N+4]});
100      y += ${BATCH_TILE};
101    }
102  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
103    const v128_t vx = wasm_v128_load(x);
104    x += 4;
105
106    const v128_t vz = wasm_f32x4_abs(vx);
107
108    v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
109    const v128_t ve = wasm_i32x4_shl(vn, 17);
110
111    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
112    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
113    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
114    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
115    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
116    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
117    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
118    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
119
120    const v128_t vs = wasm_i32x4_add(vl, ve);
121    vn = wasm_f32x4_sub(vn, vmagic_bias);
122
123    v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
124    vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
125
126    v128_t vp = wasm_f32x4_mul(vt, vc2);
127    vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
128
129    const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
130    const v128_t vd = wasm_f32x4_add(vy, vone);
131
132    v128_t vf = wasm_f32x4_div(vy, vd);
133    vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
134    vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
135
136    wasm_v128_store(y, vf);
137    y += 4;
138  }
139  if XNN_UNLIKELY(n != 0) {
140    const v128_t vx = wasm_v128_load(x);
141
142    const v128_t vz = wasm_f32x4_abs(vx);
143
144    v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
145    const v128_t ve = wasm_i32x4_shl(vn, 17);
146
147    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
148    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
149    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
150    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
151    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
152    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
153    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
154    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
155
156    const v128_t vs = wasm_i32x4_add(vl, ve);
157    vn = wasm_f32x4_sub(vn, vmagic_bias);
158
159    v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
160    vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
161
162    v128_t vp = wasm_f32x4_mul(vt, vc2);
163    vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
164
165    const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
166    const v128_t vd = wasm_f32x4_add(vy, vone);
167
168    v128_t vf = wasm_f32x4_div(vy, vd);
169    vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
170    vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
171
172    if (n & (2 * sizeof(float))) {
173      *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
174      vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
175      y += 2;
176    }
177    if (n & (1 * sizeof(float))) {
178      *y = wasm_f32x4_extract_lane(vf, 0);
179    }
180  }
181}
182