xref: /aosp_15_r20/external/XNNPACK/src/f32-prelu/gen/neon-2x4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-prelu/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16 
17 
xnn_f32_prelu_ukernel__neon_2x4(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)18 void xnn_f32_prelu_ukernel__neon_2x4(
19     size_t rows,
20     size_t channels,
21     const float*restrict input,
22     size_t input_stride,
23     const float*restrict weights,
24     float*restrict output,
25     size_t output_stride) XNN_OOB_READS
26 {
27   assert(rows != 0);
28   assert(channels != 0);
29   assert(channels % sizeof(float) == 0);
30 
31   const float* i0 = input;
32   float* o0 = output;
33   const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
34   float* o1 = (float*) ((uintptr_t) o0 + output_stride);
35 
36   const size_t input_increment = input_stride * 2 - channels;
37   const size_t output_increment = output_stride * 2 - channels;
38 
39   do {
40     if XNN_UNPREDICTABLE(rows < 2) {
41       i1 = i0;
42       o1 = o0;
43     }
44 
45     const float* w = weights;
46     size_t c = channels;
47     for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
48       const float32x4_t vw0123 = vld1q_f32(w); w += 4;
49 
50       const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
51       const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
52 
53       float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
54       const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
55       float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
56       const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
57 
58       vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
59       vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
60 
61       vst1q_f32(o0, vacc0x0123); o0 += 4;
62       vst1q_f32(o1, vacc1x0123); o1 += 4;
63     }
64     if XNN_UNLIKELY(c != 0) {
65       const float32x4_t vw0123 = vld1q_f32(w); w += 4;
66 
67       const float32x4_t vi0x0123 = vld1q_f32(i0);
68       i0 = (const float*) ((uintptr_t) i0 + c);
69       const float32x4_t vi1x0123 = vld1q_f32(i1);
70       i1 = (const float*) ((uintptr_t) i1 + c);
71 
72       float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
73       const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
74       float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
75       const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
76 
77       vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
78       vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
79 
80       float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
81       float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
82       if (c & (2 * sizeof(float))) {
83         vst1_f32(o0, vacc0x01); o0 += 2;
84         vst1_f32(o1, vacc1x01); o1 += 2;
85 
86         vacc0x01 = vget_high_f32(vacc0x0123);
87         vacc1x01 = vget_high_f32(vacc1x0123);
88       }
89       if (c & (1 * sizeof(float))) {
90         vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
91         vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
92       }
93     }
94     i0 = (const float*) ((uintptr_t) i0 + input_increment);
95     o0 = (float*) ((uintptr_t) o0 + output_increment);
96     i1 = (const float*) ((uintptr_t) i1 + input_increment);
97     o1 = (float*) ((uintptr_t) o1 + output_increment);
98     rows = doz(rows, 2);
99   } while (rows != 0);
100 }
101