xref: /aosp_15_r20/external/XNNPACK/src/s8-ibilinear/neon.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["S8", "U8"]
7$assert CHANNEL_TILE % 8 == 0
8$assert CHANNEL_TILE >= 8
9$assert PIXEL_TILE == 1
10$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
11#include <assert.h>
12
13#include <arm_neon.h>
14
15#include <xnnpack/common.h>
16#include <xnnpack/ibilinear.h>
17
18
19$XINT8_T = {"S8": "int8_t", "U8": "uint8_t"}[DATATYPE]
20$XINT8X8_T = {"S8": "int8x8_t", "U8": "uint8x8_t"}[DATATYPE]
21$VLD1_X8 = {"S8": "vld1_s8", "U8": "vld1_u8"}[DATATYPE]
22$VST1_X8 = {"S8": "vst1_s8", "U8": "vst1_u8"}[DATATYPE]
23$VST1_LANE_X8 = {"S8": "vst1_lane_s8", "U8": "vst1_lane_u8"}[DATATYPE]
24$VEXT_X8 = {"S8": "vext_s8", "U8": "vext_u8"}[DATATYPE]
25$VREINTERPRET_U32_X8 = {"S8": "vreinterpret_u32_s8", "U8": "vreinterpret_u32_u8"}[DATATYPE]
26$VREINTERPRET_U16_X8 = {"S8": "vreinterpret_u16_s8", "U8": "vreinterpret_u16_u8"}[DATATYPE]
27void xnn_${DATATYPE.lower()}_ibilinear_ukernel__neon_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}(
28    size_t output_pixels,
29    size_t channels,
30    const ${XINT8_T}**restrict input,
31    size_t input_offset,
32    const int16_t*restrict weights,
33    ${XINT8_T}*restrict output,
34    size_t output_increment) XNN_OOB_READS
35{
36  assert(output_pixels != 0);
37  assert(channels != 0);
38
39  do {
40    const ${XINT8_T}* i0 = (const ${XINT8_T}*) ((uintptr_t) input[0] + input_offset);
41    const ${XINT8_T}* i1 = (const ${XINT8_T}*) ((uintptr_t) input[1] + input_offset);
42    const ${XINT8_T}* i2 = (const ${XINT8_T}*) ((uintptr_t) input[2] + input_offset);
43    const ${XINT8_T}* i3 = (const ${XINT8_T}*) ((uintptr_t) input[3] + input_offset);
44    input += 4;
45
46    #if XNN_ARCH_ARM64
47      const int16x8_t valphah = vld1q_dup_s16(weights); weights += 1;
48    #else
49      const int16x4_t valphah = vld1_dup_s16(weights); weights += 1;
50    #endif
51    const int32x4_t valphav = vmovl_s16(vld1_dup_s16(weights)); weights += 1;
52
53    size_t c = channels;
54    $if CHANNEL_TILE > 8:
55      for (; c >= ${CHANNEL_TILE} * sizeof(${XINT8_T}); c -= ${CHANNEL_TILE} * sizeof(${XINT8_T})) {
56        $for C in range(0, CHANNEL_TILE, 8):
57          const ${XINT8X8_T} vtl${ABC[C:C+8]} = ${VLD1_X8}(i0); i0 += 8;
58          const ${XINT8X8_T} vtr${ABC[C:C+8]} = ${VLD1_X8}(i1); i1 += 8;
59          const ${XINT8X8_T} vbl${ABC[C:C+8]} = ${VLD1_X8}(i2); i2 += 8;
60          const ${XINT8X8_T} vbr${ABC[C:C+8]} = ${VLD1_X8}(i3); i3 += 8;
61
62        $for C in range(0, CHANNEL_TILE, 8):
63          $if DATATYPE == "S8":
64            const int16x8_t vtd${ABC[C:C+8]} = vsubl_s8(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]});
65            const int16x8_t vbd${ABC[C:C+8]} = vsubl_s8(vbr${ABC[C:C+8]}, vbl${ABC[C:C+8]});
66            const int16x8_t vdl${ABC[C:C+8]} = vsubl_s8(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]});
67            const int16x8_t vxtl${ABC[C:C+8]} = vmovl_s8(vtl${ABC[C:C+8]});
68          $else:
69            const int16x8_t vtd${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]}));
70            const int16x8_t vbd${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vbr${ABC[C:C+8]}, vbl${ABC[C:C+8]}));
71            const int16x8_t vdl${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]}));
72            const int16x8_t vxtl${ABC[C:C+8]} = vreinterpretq_s16_u16(vmovl_u8(vtl${ABC[C:C+8]}));
73
74        $for C in range(0, CHANNEL_TILE, 8):
75          const int16x8_t vdd${ABC[C:C+8]} = vsubq_s16(vbd${ABC[C:C+8]}, vtd${ABC[C:C+8]});
76
77        #if XNN_ARCH_ARM64
78          $for C in range(0, CHANNEL_TILE, 8):
79            const int32x4_t vt${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl${ABC[C:C+8]}), 11), vget_low_s16(vtd${ABC[C:C+8]}), vget_low_s16(valphah));
80            const int32x4_t vt${ABC[C+4:C+8]} = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl${ABC[C:C+8]}), 11), vtd${ABC[C:C+8]}, valphah);
81
82          $for C in range(0, CHANNEL_TILE, 8):
83            const int32x4_t vd${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vdl${ABC[C:C+8]}), 11), vget_low_s16(vdd${ABC[C:C+8]}), vget_low_s16(valphah));
84            const int32x4_t vd${ABC[C+4:C+8]} = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl${ABC[C:C+8]}), 11), vdd${ABC[C:C+8]}, valphah);
85        #else  // !XNN_ARCH_ARM64
86          $for C in range(0, CHANNEL_TILE, 8):
87            const int32x4_t vt${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl${ABC[C:C+8]}), 11), vget_low_s16(vtd${ABC[C:C+8]}), valphah);
88            const int32x4_t vt${ABC[C+4:C+8]} = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl${ABC[C:C+8]}), 11), vget_high_s16(vtd${ABC[C:C+8]}), valphah);
89
90          $for C in range(0, CHANNEL_TILE, 8):
91            const int32x4_t vd${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vdl${ABC[C:C+8]}), 11), vget_low_s16(vdd${ABC[C:C+8]}), valphah);
92            const int32x4_t vd${ABC[C+4:C+8]} = vmlal_s16(vshll_n_s16(vget_high_s16(vdl${ABC[C:C+8]}), 11), vget_high_s16(vdd${ABC[C:C+8]}), valphah);
93        #endif  // !XNN_ARCH_ARM64
94
95        $for C in range(0, CHANNEL_TILE, 8):
96          const int32x4_t vacc${ABC[C:C+4]} = vmlaq_s32(vshlq_n_s32(vt${ABC[C:C+4]}, 11), vd${ABC[C:C+4]}, valphav);
97          const int32x4_t vacc${ABC[C+4:C+8]} = vmlaq_s32(vshlq_n_s32(vt${ABC[C+4:C+8]}, 11), vd${ABC[C+4:C+8]}, valphav);
98
99        #if XNN_ARCH_ARM64
100          $for C in range(0, CHANNEL_TILE, 8):
101            const int16x8_t vacc${ABC[C:C+8]} = vuzp2q_s16(vreinterpretq_s16_s32(vacc${ABC[C:C+4]}), vreinterpretq_s16_s32(vacc${ABC[C+4:C+8]}));
102        #else  // !XNN_ARCH_ARM64
103          $for C in range(0, CHANNEL_TILE, 8):
104            const int16x8_t vacc${ABC[C:C+8]} = vcombine_s16(vshrn_n_s32(vacc${ABC[C:C+4]}, 16), vshrn_n_s32(vacc${ABC[C+4:C+8]}, 16));
105        #endif  // !XNN_ARCH_ARM64
106
107        $if DATATYPE == "S8":
108          $for C in range(0, CHANNEL_TILE, 8):
109            const int8x8_t vo${ABC[C:C+8]} = vrshrn_n_s16(vacc${ABC[C:C+8]}, 6);
110        $else:
111          $for C in range(0, CHANNEL_TILE, 8):
112            const uint8x8_t vo${ABC[C:C+8]} = vrshrn_n_u16(vreinterpretq_u16_s16(vacc${ABC[C:C+8]}), 6);
113
114        $for C in range(0, CHANNEL_TILE, 8):
115          ${VST1_X8}(output, vo${ABC[C:C+8]}); output += 8;
116      }
117    for (; c >= 8 * sizeof(${XINT8_T}); c -= 8 * sizeof(${XINT8_T})) {
118      const ${XINT8X8_T} vtl01234567 = ${VLD1_X8}(i0); i0 += 8;
119      const ${XINT8X8_T} vtr01234567 = ${VLD1_X8}(i1); i1 += 8;
120      const ${XINT8X8_T} vbl01234567 = ${VLD1_X8}(i2); i2 += 8;
121      const ${XINT8X8_T} vbr01234567 = ${VLD1_X8}(i3); i3 += 8;
122
123      $if DATATYPE == "S8":
124        const int16x8_t vtd01234567 = vsubl_s8(vtr01234567, vtl01234567);
125        const int16x8_t vbd01234567 = vsubl_s8(vbr01234567, vbl01234567);
126        const int16x8_t vdl01234567 = vsubl_s8(vbl01234567, vtl01234567);
127        const int16x8_t vxtl01234567 = vmovl_s8(vtl01234567);
128      $else:
129        const int16x8_t vtd01234567 = vreinterpretq_s16_u16(vsubl_u8(vtr01234567, vtl01234567));
130        const int16x8_t vbd01234567 = vreinterpretq_s16_u16(vsubl_u8(vbr01234567, vbl01234567));
131        const int16x8_t vdl01234567 = vreinterpretq_s16_u16(vsubl_u8(vbl01234567, vtl01234567));
132        const int16x8_t vxtl01234567 = vreinterpretq_s16_u16(vmovl_u8(vtl01234567));
133
134      const int16x8_t vdd01234567 = vsubq_s16(vbd01234567, vtd01234567);
135
136      #if XNN_ARCH_ARM64
137        const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), vget_low_s16(valphah));
138        const int32x4_t vt4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vtd01234567, valphah);
139
140        const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), vget_low_s16(valphah));
141        const int32x4_t vd4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vdd01234567, valphah);
142      #else  // !XNN_ARCH_ARM64
143        const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), valphah);
144        const int32x4_t vt4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vget_high_s16(vtd01234567), valphah);
145
146        const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), valphah);
147        const int32x4_t vd4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vget_high_s16(vdd01234567), valphah);
148      #endif  // !XNN_ARCH_ARM64
149
150      const int32x4_t vacc0123 = vmlaq_s32(vshlq_n_s32(vt0123, 11), vd0123, valphav);
151      const int32x4_t vacc4567 = vmlaq_s32(vshlq_n_s32(vt4567, 11), vd4567, valphav);
152
153      #if XNN_ARCH_ARM64
154        const int16x8_t vacc01234567 = vuzp2q_s16(vreinterpretq_s16_s32(vacc0123), vreinterpretq_s16_s32(vacc4567));
155      #else  // !XNN_ARCH_ARM64
156        const int16x8_t vacc01234567 = vcombine_s16(vshrn_n_s32(vacc0123, 16), vshrn_n_s32(vacc4567, 16));
157      #endif  // !XNN_ARCH_ARM64
158
159      $if DATATYPE == "S8":
160        const int8x8_t vo01234567 = vrshrn_n_s16(vacc01234567, 6);
161      $else:
162        const uint8x8_t vo01234567 = vrshrn_n_u16(vreinterpretq_u16_s16(vacc01234567), 6);
163
164      ${VST1_X8}(output, vo01234567); output += 8;
165    }
166    if XNN_UNLIKELY(c != 0) {
167      const ${XINT8X8_T} vtl01234567 = ${VLD1_X8}(i0);
168      const ${XINT8X8_T} vtr01234567 = ${VLD1_X8}(i1);
169      const ${XINT8X8_T} vbl01234567 = ${VLD1_X8}(i2);
170      const ${XINT8X8_T} vbr01234567 = ${VLD1_X8}(i3);
171
172      $if DATATYPE == "S8":
173        const int16x8_t vtd01234567 = vsubl_s8(vtr01234567, vtl01234567);
174        const int16x8_t vbd01234567 = vsubl_s8(vbr01234567, vbl01234567);
175        const int16x8_t vdl01234567 = vsubl_s8(vbl01234567, vtl01234567);
176        const int16x8_t vxtl01234567 = vmovl_s8(vtl01234567);
177      $else:
178        const int16x8_t vtd01234567 = vreinterpretq_s16_u16(vsubl_u8(vtr01234567, vtl01234567));
179        const int16x8_t vbd01234567 = vreinterpretq_s16_u16(vsubl_u8(vbr01234567, vbl01234567));
180        const int16x8_t vdl01234567 = vreinterpretq_s16_u16(vsubl_u8(vbl01234567, vtl01234567));
181        const int16x8_t vxtl01234567 = vreinterpretq_s16_u16(vmovl_u8(vtl01234567));
182
183      const int16x8_t vdd01234567 = vsubq_s16(vbd01234567, vtd01234567);
184
185      #if XNN_ARCH_ARM64
186        const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), vget_low_s16(valphah));
187        const int32x4_t vt4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vtd01234567, valphah);
188
189        const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), vget_low_s16(valphah));
190        const int32x4_t vd4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vdd01234567, valphah);
191      #else  // !XNN_ARCH_ARM64
192        const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), valphah);
193        const int32x4_t vt4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vget_high_s16(vtd01234567), valphah);
194
195        const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), valphah);
196        const int32x4_t vd4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vget_high_s16(vdd01234567), valphah);
197      #endif  // !XNN_ARCH_ARM64
198
199      const int32x4_t vacc0123 = vmlaq_s32(vshlq_n_s32(vt0123, 11), vd0123, valphav);
200      const int32x4_t vacc4567 = vmlaq_s32(vshlq_n_s32(vt4567, 11), vd4567, valphav);
201
202      #if XNN_ARCH_ARM64
203        const int16x8_t vacc01234567 = vuzp2q_s16(vreinterpretq_s16_s32(vacc0123), vreinterpretq_s16_s32(vacc4567));
204      #else  // !XNN_ARCH_ARM64
205        const int16x8_t vacc01234567 = vcombine_s16(vshrn_n_s32(vacc0123, 16), vshrn_n_s32(vacc4567, 16));
206      #endif  // !XNN_ARCH_ARM64
207
208      $if DATATYPE == "S8":
209        int8x8_t vo01234567 = vrshrn_n_s16(vacc01234567, 6);
210      $else:
211        uint8x8_t vo01234567 = vrshrn_n_u16(vreinterpretq_u16_s16(vacc01234567), 6);
212
213      if (c & (4 * sizeof(${XINT8_T}))) {
214        vst1_lane_u32((void*) output, ${VREINTERPRET_U32_X8}(vo01234567), 0); output += 4;
215        vo01234567 = ${VEXT_X8}(vo01234567, vo01234567, 4);
216      }
217      if (c & (2 * sizeof(${XINT8_T}))) {
218        vst1_lane_u16((void*) output, ${VREINTERPRET_U16_X8}(vo01234567), 0); output += 2;
219        vo01234567 = ${VEXT_X8}(vo01234567, vo01234567, 2);
220      }
221      if (c & (1 * sizeof(${XINT8_T}))) {
222        ${VST1_LANE_X8}(output, vo01234567, 0); output += 1;
223      }
224    }
225
226    output = (${XINT8_T}*) ((uintptr_t) output + output_increment);
227  } while (--output_pixels != 0);
228}
229