xref: /aosp_15_r20/external/XNNPACK/src/cs16-vsquareabs/gen/neon-mlal-ld128-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/cs16-vsquareabs/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <stddef.h>
12 #include <stdint.h>
13 
14 #include <arm_neon.h>
15 
16 #include <xnnpack/vsquareabs.h>
17 
18 
xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x16(size_t batch,const int16_t * input,uint32_t * output)19 void xnn_cs16_vsquareabs_ukernel__neon_mlal_ld128_x16(
20     size_t batch,
21     const int16_t* input,
22     uint32_t* output) {
23 
24   assert(batch != 0);
25   assert(input != NULL);
26   assert(output != NULL);
27 
28   for (; batch >= 16; batch -= 16) {
29     const int16x4x2_t vi0 = vld2_s16(input); input += 8;
30     const int16x4x2_t vi1 = vld2_s16(input); input += 8;
31     const int16x4x2_t vi2 = vld2_s16(input); input += 8;
32     const int16x4x2_t vi3 = vld2_s16(input); input += 8;
33 
34     int32x4_t vacc0 = vmull_s16(vi0.val[0], vi0.val[0]);
35     vacc0 = vmlal_s16(vacc0, vi0.val[1], vi0.val[1]);
36     int32x4_t vacc1 = vmull_s16(vi1.val[0], vi1.val[0]);
37     vacc1 = vmlal_s16(vacc1, vi1.val[1], vi1.val[1]);
38     int32x4_t vacc2 = vmull_s16(vi2.val[0], vi2.val[0]);
39     vacc2 = vmlal_s16(vacc2, vi2.val[1], vi2.val[1]);
40     int32x4_t vacc3 = vmull_s16(vi3.val[0], vi3.val[0]);
41     vacc3 = vmlal_s16(vacc3, vi3.val[1], vi3.val[1]);
42 
43     vst1q_u32(output, vreinterpretq_u32_s32(vacc0)); output += 4;
44     vst1q_u32(output, vreinterpretq_u32_s32(vacc1)); output += 4;
45     vst1q_u32(output, vreinterpretq_u32_s32(vacc2)); output += 4;
46     vst1q_u32(output, vreinterpretq_u32_s32(vacc3)); output += 4;
47   }
48 
49   // Remainder of full vectors
50   for (; batch >= 4; batch -= 4) {
51     const int16x4x2_t vi = vld2_s16(input); input += 8;
52 
53     int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]);
54 
55     vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]);
56 
57     vst1q_u32(output, vreinterpretq_u32_s32(vacc)); output += 4;
58   }
59 
60   // Remainder of 1 to 3 elements
61   if XNN_UNLIKELY(batch != 0) {
62     const int16x4x2_t vi = vld2_s16(input);
63 
64     int32x4_t vacc = vmull_s16(vi.val[0], vi.val[0]);
65     vacc = vmlal_s16(vacc, vi.val[1], vi.val[1]);
66 
67     uint32x2_t vacc_lo = vreinterpret_u32_s32(vget_low_s32(vacc));
68     if (batch & 2) {
69       vst1_u32(output, vacc_lo); output += 2;
70       vacc_lo = vreinterpret_u32_s32(vget_high_s32(vacc));
71     }
72     if (batch & 1) {
73       vst1_lane_u32(output, vacc_lo, 0);
74     }
75   }
76 }
77