1 // Auto-generated file. Do not edit!
2 // Template: src/f16-vlrelu/neonfp16arith.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f16_vlrelu_ukernel__neonfp16arith_x16(size_t batch,const void * input,void * output,const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vlrelu_ukernel__neonfp16arith_x16(
19 size_t batch,
20 const void* input,
21 void* output,
22 const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(batch != 0);
25 assert(batch % sizeof(__fp16) == 0);
26
27 const float16x8_t vslope = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.slope));
28 const __fp16* i = (const __fp16*) input;
29 __fp16* o = (__fp16*) output;
30 for (; batch >= 16 * sizeof(__fp16); batch -= 16 * sizeof(__fp16)) {
31 const float16x8_t vx01234567 = vld1q_f16(i); i += 8;
32 const float16x8_t vx89ABCDEF = vld1q_f16(i); i += 8;
33
34 float16x8_t vacc01234567 = vmulq_f16(vx01234567, vslope);
35 const uint16x8_t vmask01234567 = vcltq_s16(vreinterpretq_s16_f16(vx01234567), vmovq_n_s16(0));
36 float16x8_t vacc89ABCDEF = vmulq_f16(vx89ABCDEF, vslope);
37 const uint16x8_t vmask89ABCDEF = vcltq_s16(vreinterpretq_s16_f16(vx89ABCDEF), vmovq_n_s16(0));
38
39 vacc01234567 = vbslq_f16(vmask01234567, vacc01234567, vx01234567);
40 vacc89ABCDEF = vbslq_f16(vmask89ABCDEF, vacc89ABCDEF, vx89ABCDEF);
41
42 vst1q_f16(o, vacc01234567); o += 8;
43 vst1q_f16(o, vacc89ABCDEF); o += 8;
44 }
45 for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
46 const float16x8_t vx = vld1q_f16(i); i += 8;
47 float16x8_t vacc = vmulq_f16(vx, vslope);
48 const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
49 vacc = vbslq_f16(vmask, vacc, vx);
50 vst1q_f16(o, vacc); o += 8;
51 }
52 if XNN_UNLIKELY(batch != 0) {
53 const float16x8_t vx = vld1q_f16(i);
54 float16x8_t vacc = vmulq_f16(vx, vslope);
55 const uint16x8_t vmask = vcltq_s16(vreinterpretq_s16_f16(vx), vmovq_n_s16(0));
56 vacc = vbslq_f16(vmask, vacc, vx);
57
58 float16x4_t vacc_lo = vget_low_f16(vacc);
59 if (batch & (4 * sizeof(__fp16))) {
60 vst1_f16(o, vacc_lo); o += 4;
61 vacc_lo = vget_high_f16(vacc);
62 }
63 if (batch & (2 * sizeof(__fp16))) {
64 vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
65 vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
66 }
67 if (batch & (1 * sizeof(__fp16))) {
68 vst1_lane_f16(o, vacc_lo, 0);
69 }
70 }
71 }
72