1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16
17
xnn_qu8_vlrelu_ukernel__neon_x16(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_vlrelu_ukernel__neon_x16(
19 size_t n,
20 const uint8_t* x,
21 uint8_t* y,
22 const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint8_t) == 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
30 const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
31 const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
32 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
33 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
34 const uint8x16_t vx0 = vld1q_u8(x); x += 16;
35
36 int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
37 int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
38
39 const uint16x8_t vmask0 = vcltq_s16(vacc0, vmovq_n_s16(0));
40 const uint16x8_t vmask1 = vcltq_s16(vacc1, vmovq_n_s16(0));
41
42 vacc0 = vshlq_n_s16(vacc0, 7);
43 vacc1 = vshlq_n_s16(vacc1, 7);
44
45 const int16x8_t vmultiplier0 = vbslq_s16(vmask0, vpositive_multiplier, vnegative_multiplier);
46 const int16x8_t vmultiplier1 = vbslq_s16(vmask1, vpositive_multiplier, vnegative_multiplier);
47
48 vacc0 = vqrdmulhq_s16(vacc0, vmultiplier0);
49 vacc1 = vqrdmulhq_s16(vacc1, vmultiplier1);
50
51 vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
52 vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
53
54 const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
55
56 vst1q_u8(y, vy0); y += 16;
57 }
58 for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
59 const uint8x8_t vx = vld1_u8(x); x += 8;
60 int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
61 const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
62 vacc = vshlq_n_s16(vacc, 7);
63 const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
64 vacc = vqrdmulhq_s16(vacc, vmultiplier);
65 vacc = vqaddq_s16(vacc, voutput_zero_point);
66 const uint8x8_t vy = vqmovun_s16(vacc);
67 vst1_u8(y, vy); y += 8;
68 }
69 if XNN_UNLIKELY(n != 0) {
70 assert(n >= 1 * sizeof(uint8_t));
71 assert(n <= 7 * sizeof(uint8_t));
72
73 const uint8x8_t vx = vld1_u8(x);
74 int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
75 const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
76 vacc = vshlq_n_s16(vacc, 7);
77 const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
78 vacc = vqrdmulhq_s16(vacc, vmultiplier);
79 vacc = vqaddq_s16(vacc, voutput_zero_point);
80 uint8x8_t vy = vqmovun_s16(vacc);
81
82 if (n & (4 * sizeof(uint8_t))) {
83 vst1_lane_u32((void*) y, vreinterpret_u32_u8(vy), 0); y += 4;
84 vy = vext_u8(vy, vy, 4);
85 }
86 if (n & (2 * sizeof(uint8_t))) {
87 vst1_lane_u16((void*) y, vreinterpret_u16_u8(vy), 0); y += 2;
88 vy = vext_u8(vy, vy, 2);
89 }
90 if (n & (1 * sizeof(uint8_t))) {
91 vst1_lane_u8(y, vy, 0);
92 }
93 }
94 }
95