1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/wasmsimd-arm.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16(
19 size_t n,
20 const int8_t* x,
21 int8_t* y,
22 const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(int8_t) == 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
30 const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
31 const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
32 const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
33 for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
34 v128_t vx0 = wasm_v128_load(x);
35 x += 16;
36
37 v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx0));
38 v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx0));
39 v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
40 v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
41
42 vacc0 = wasm_i16x8_shl(vacc0, 7);
43 vmultiplier0 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
44 vacc1 = wasm_i16x8_shl(vacc1, 7);
45 vmultiplier1 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
46
47 vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
48 vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
49
50 vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
51 vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
52
53 const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
54
55 wasm_v128_store(y, vy0);
56 y += 16;
57 }
58 for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
59 const v128_t vx = wasm_i16x8_load8x8(x);
60 v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
61 v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
62 vacc = wasm_i16x8_shl(vacc, 7);
63 vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
64 vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
65 vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
66 x += 8;
67
68 const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
69 wasm_v128_store64_lane(y, vy, 0);
70 y += 8;
71 }
72 if XNN_UNLIKELY(n != 0) {
73 assert(n >= 1 * sizeof(int8_t));
74 assert(n <= 7 * sizeof(int8_t));
75
76 const v128_t vx = wasm_i16x8_load8x8(x);
77 v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
78 v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
79 vacc = wasm_i16x8_shl(vacc, 7);
80 vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
81 vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
82 vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
83
84 v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
85 if (n & (4 * sizeof(int8_t))) {
86 wasm_v128_store32_lane(y, vy, 0);
87 vy = wasm_u64x2_shr(vy, 32);
88 y += 4;
89 }
90 if (n & (2 * sizeof(int8_t))) {
91 wasm_v128_store16_lane(y, vy, 0);
92 vy = wasm_u32x4_shr(vy, 16);
93 y += 2;
94 }
95 if (n & (1 * sizeof(int8_t))) {
96 wasm_v128_store8_lane(y, vy, 0);
97 }
98 }
99 }
100