1 // Auto-generated file. Do not edit!
2 // Template: src/s16-window/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <stddef.h>
12 #include <stdint.h>
13
14 #include <arm_neon.h>
15
16 #include <xnnpack/math.h>
17 #include <xnnpack/window.h>
18
19
xnn_s16_window_ukernel__neon_x24(size_t rows,size_t batch_size,const int16_t * input,const int16_t * weights,int16_t * output,uint32_t shift)20 void xnn_s16_window_ukernel__neon_x24(
21 size_t rows,
22 size_t batch_size,
23 const int16_t* input,
24 const int16_t* weights,
25 int16_t* output,
26 uint32_t shift)
27 {
28 assert(rows != 0);
29 assert(batch_size != 0);
30 assert(input != NULL);
31 assert(weights != NULL);
32 assert(output != NULL);
33 assert(shift < 32);
34
35 const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift); // negative to shift right.
36
37 do {
38 const int16_t* w = weights;
39 size_t n = batch_size * sizeof(int16_t);
40 for (; n >= 24 * sizeof(int16_t); n -= 24 * sizeof(int16_t)) {
41 const int16x8_t vi0 = vld1q_s16(input); input += 8;
42 const int16x8_t vi1 = vld1q_s16(input); input += 8;
43 const int16x8_t vi2 = vld1q_s16(input); input += 8;
44
45 const int16x8_t vw0 = vld1q_s16(w); w += 8;
46 const int16x8_t vw1 = vld1q_s16(w); w += 8;
47 const int16x8_t vw2 = vld1q_s16(w); w += 8;
48
49 int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
50 int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
51 int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
52 int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
53 int32x4_t vacc2_lo = vmull_s16(vget_low_s16(vi2), vget_low_s16(vw2));
54 int32x4_t vacc2_hi = vmull_s16(vget_high_s16(vi2), vget_high_s16(vw2));
55
56 vacc0_lo = vshlq_s32(vacc0_lo, vshift);
57 vacc0_hi = vshlq_s32(vacc0_hi, vshift);
58 vacc1_lo = vshlq_s32(vacc1_lo, vshift);
59 vacc1_hi = vshlq_s32(vacc1_hi, vshift);
60 vacc2_lo = vshlq_s32(vacc2_lo, vshift);
61 vacc2_hi = vshlq_s32(vacc2_hi, vshift);
62
63 const int16x8_t vout0 = vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi));
64 const int16x8_t vout1 = vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi));
65 const int16x8_t vout2 = vcombine_s16(vqmovn_s32(vacc2_lo), vqmovn_s32(vacc2_hi));
66
67 vst1q_s16(output, vout0); output += 8;
68 vst1q_s16(output, vout1); output += 8;
69 vst1q_s16(output, vout2); output += 8;
70 }
71
72 // Remainder of full vectors
73 for (; n >= 8 * sizeof(int16_t); n -= 8 * sizeof(int16_t)) {
74 const int16x8_t vi = vld1q_s16(input); input += 8;
75 const int16x8_t vw = vld1q_s16(w); w += 8;
76 int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
77 int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
78 vacc_lo = vshlq_s32(vacc_lo, vshift);
79 vacc_hi = vshlq_s32(vacc_hi, vshift);
80 const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
81 vst1q_s16(output, vout); output += 8;
82 }
83
84 assert(n % 2 == 0);
85 // Remainder of 1 to 7 batch_size
86 if XNN_UNLIKELY(n != 0) {
87 const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + n);
88 const int16x8_t vw = vld1q_s16(w);
89 int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
90 vacc = vshlq_s32(vacc, vshift);
91 int16x4_t vout = vqmovn_s32(vacc);
92 if (n & (4 * sizeof(int16_t))) {
93 vst1_s16(output, vout); output += 4;
94 vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
95 vacc = vshlq_s32(vacc, vshift);
96 vout = vqmovn_s32(vacc);
97 }
98 if (n & (2 * sizeof(int16_t))) {
99 vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
100 vout = vext_s16(vout, vout, 2);
101 }
102 if (n & (1 * sizeof(int16_t))) {
103 vst1_lane_s16(output, vout, 0); output += 1;
104 }
105 }
106
107 } while (--rows != 0);
108 }
109