xref: /aosp_15_r20/external/XNNPACK/src/s16-window/gen/neon-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/s16-window/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <stddef.h>
12 #include <stdint.h>
13 
14 #include <arm_neon.h>
15 
16 #include <xnnpack/math.h>
17 #include <xnnpack/window.h>
18 
19 
xnn_s16_window_ukernel__neon_x16(size_t rows,size_t batch_size,const int16_t * input,const int16_t * weights,int16_t * output,uint32_t shift)20 void xnn_s16_window_ukernel__neon_x16(
21     size_t rows,
22     size_t batch_size,
23     const int16_t* input,
24     const int16_t* weights,
25     int16_t* output,
26     uint32_t shift)
27 {
28   assert(rows != 0);
29   assert(batch_size != 0);
30   assert(input != NULL);
31   assert(weights != NULL);
32   assert(output != NULL);
33   assert(shift < 32);
34 
35   const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift);  // negative to shift right.
36 
37   do {
38     const int16_t* w = weights;
39     size_t n = batch_size * sizeof(int16_t);
40     for (; n >= 16 * sizeof(int16_t); n -= 16 * sizeof(int16_t)) {
41       const int16x8_t vi0 = vld1q_s16(input); input += 8;
42       const int16x8_t vi1 = vld1q_s16(input); input += 8;
43 
44       const int16x8_t vw0 = vld1q_s16(w); w += 8;
45       const int16x8_t vw1 = vld1q_s16(w); w += 8;
46 
47       int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
48       int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
49       int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
50       int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
51 
52       vacc0_lo = vshlq_s32(vacc0_lo, vshift);
53       vacc0_hi = vshlq_s32(vacc0_hi, vshift);
54       vacc1_lo = vshlq_s32(vacc1_lo, vshift);
55       vacc1_hi = vshlq_s32(vacc1_hi, vshift);
56 
57       const int16x8_t vout0 = vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi));
58       const int16x8_t vout1 = vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi));
59 
60       vst1q_s16(output, vout0); output += 8;
61       vst1q_s16(output, vout1); output += 8;
62     }
63 
64     // Remainder of full vectors
65     for (; n >= 8 * sizeof(int16_t); n -= 8 * sizeof(int16_t)) {
66       const int16x8_t vi = vld1q_s16(input); input += 8;
67       const int16x8_t vw = vld1q_s16(w); w += 8;
68       int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
69       int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
70       vacc_lo = vshlq_s32(vacc_lo, vshift);
71       vacc_hi = vshlq_s32(vacc_hi, vshift);
72       const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
73       vst1q_s16(output, vout); output += 8;
74     }
75 
76     assert(n % 2 == 0);
77     // Remainder of 1 to 7 batch_size
78     if XNN_UNLIKELY(n != 0) {
79       const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + n);
80       const int16x8_t vw = vld1q_s16(w);
81       int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
82       vacc = vshlq_s32(vacc, vshift);
83       int16x4_t vout = vqmovn_s32(vacc);
84       if (n & (4 * sizeof(int16_t))) {
85         vst1_s16(output, vout); output += 4;
86         vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
87         vacc = vshlq_s32(vacc, vshift);
88         vout = vqmovn_s32(vacc);
89       }
90       if (n & (2 * sizeof(int16_t))) {
91         vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
92         vout = vext_s16(vout, vout, 2);
93       }
94       if (n & (1 * sizeof(int16_t))) {
95         vst1_lane_s16(output, vout, 0); output += 1;
96       }
97     }
98 
99   } while (--rows != 0);
100 }
101