1 // Auto-generated file. Do not edit!
2 // Template: src/f32-prelu/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16
17
xnn_f32_prelu_ukernel__neon_4x4(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)18 void xnn_f32_prelu_ukernel__neon_4x4(
19 size_t rows,
20 size_t channels,
21 const float*restrict input,
22 size_t input_stride,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_stride) XNN_OOB_READS
26 {
27 assert(rows != 0);
28 assert(channels != 0);
29 assert(channels % sizeof(float) == 0);
30
31 const float* i0 = input;
32 float* o0 = output;
33 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
34 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
35 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
36 float* o2 = (float*) ((uintptr_t) o1 + output_stride);
37 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
38 float* o3 = (float*) ((uintptr_t) o2 + output_stride);
39
40 const size_t input_increment = input_stride * 4 - channels;
41 const size_t output_increment = output_stride * 4 - channels;
42
43 do {
44 if XNN_UNPREDICTABLE(rows < 2) {
45 i1 = i0;
46 o1 = o0;
47 }
48 if XNN_UNPREDICTABLE(rows <= 2) {
49 i2 = i1;
50 o2 = o1;
51 }
52 if XNN_UNPREDICTABLE(rows < 4) {
53 i3 = i2;
54 o3 = o2;
55 }
56
57 const float* w = weights;
58 size_t c = channels;
59 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
60 const float32x4_t vw0123 = vld1q_f32(w); w += 4;
61
62 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
63 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
64 const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4;
65 const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4;
66
67 float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
68 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
69 float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
70 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
71 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
72 const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
73 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
74 const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
75
76 vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
77 vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
78 vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
79 vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
80
81 vst1q_f32(o0, vacc0x0123); o0 += 4;
82 vst1q_f32(o1, vacc1x0123); o1 += 4;
83 vst1q_f32(o2, vacc2x0123); o2 += 4;
84 vst1q_f32(o3, vacc3x0123); o3 += 4;
85 }
86 if XNN_UNLIKELY(c != 0) {
87 const float32x4_t vw0123 = vld1q_f32(w); w += 4;
88
89 const float32x4_t vi0x0123 = vld1q_f32(i0);
90 i0 = (const float*) ((uintptr_t) i0 + c);
91 const float32x4_t vi1x0123 = vld1q_f32(i1);
92 i1 = (const float*) ((uintptr_t) i1 + c);
93 const float32x4_t vi2x0123 = vld1q_f32(i2);
94 i2 = (const float*) ((uintptr_t) i2 + c);
95 const float32x4_t vi3x0123 = vld1q_f32(i3);
96 i3 = (const float*) ((uintptr_t) i3 + c);
97
98 float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
99 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
100 float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
101 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
102 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123);
103 const uint32x4_t vm2x0123 = vcltq_s32(vreinterpretq_s32_f32(vi2x0123), vmovq_n_s32(0));
104 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123);
105 const uint32x4_t vm3x0123 = vcltq_s32(vreinterpretq_s32_f32(vi3x0123), vmovq_n_s32(0));
106
107 vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
108 vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
109 vacc2x0123 = vbslq_f32(vm2x0123, vacc2x0123, vi2x0123);
110 vacc3x0123 = vbslq_f32(vm3x0123, vacc3x0123, vi3x0123);
111
112 float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
113 float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
114 float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
115 float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
116 if (c & (2 * sizeof(float))) {
117 vst1_f32(o0, vacc0x01); o0 += 2;
118 vst1_f32(o1, vacc1x01); o1 += 2;
119 vst1_f32(o2, vacc2x01); o2 += 2;
120 vst1_f32(o3, vacc3x01); o3 += 2;
121
122 vacc0x01 = vget_high_f32(vacc0x0123);
123 vacc1x01 = vget_high_f32(vacc1x0123);
124 vacc2x01 = vget_high_f32(vacc2x0123);
125 vacc3x01 = vget_high_f32(vacc3x0123);
126 }
127 if (c & (1 * sizeof(float))) {
128 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
129 vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
130 vst1_lane_f32(o2, vacc2x01, 0); o2 += 1;
131 vst1_lane_f32(o3, vacc3x01, 0); o3 += 1;
132 }
133 }
134 i0 = (const float*) ((uintptr_t) i0 + input_increment);
135 o0 = (float*) ((uintptr_t) o0 + output_increment);
136 i1 = (const float*) ((uintptr_t) i1 + input_increment);
137 o1 = (float*) ((uintptr_t) o1 + output_increment);
138 i2 = (const float*) ((uintptr_t) i2 + input_increment);
139 o2 = (float*) ((uintptr_t) o2 + output_increment);
140 i3 = (const float*) ((uintptr_t) i3 + input_increment);
141 o3 = (float*) ((uintptr_t) o3 + output_increment);
142 rows = doz(rows, 4);
143 } while (rows != 0);
144 }
145