1 /*
2 * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13
14 #include "aom_dsp/arm/aom_neon_sve_bridge.h"
15 #include "warp_plane_neon.h"
16
17 DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
18 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
19 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10,
20 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
21 };
22
horizontal_filter_4x1_f4(const uint8x16_t in,int sx,int alpha)23 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
24 int sx, int alpha) {
25 // Only put the constant in every other lane to avoid double-counting when
26 // performing the pairwise add later.
27 const int32x4_t add_const =
28 vreinterpretq_s32_u64(vdupq_n_u64(1 << (8 + FILTER_BITS - 1)));
29
30 // Loading the 8 filter taps
31 int16x8_t f[4];
32 load_filters_4(f, sx, alpha);
33
34 int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
35 int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
36
37 uint8x8_t in0 = vget_low_u8(in);
38 uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
39 uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
40 uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
41
42 int32x4_t m01 = vusdotq_s32(add_const, vcombine_u8(in0, in1), f01_u8);
43 int32x4_t m23 = vusdotq_s32(add_const, vcombine_u8(in2, in3), f23_u8);
44
45 int32x4_t m0123 = vpaddq_s32(m01, m23);
46
47 uint16x8_t res =
48 vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS), vdup_n_u16(0));
49 return vreinterpretq_s16_u16(res);
50 }
51
horizontal_filter_8x1_f8(const uint8x16_t in,int sx,int alpha)52 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
53 int sx, int alpha) {
54 // Only put the constant in every other lane to avoid double-counting when
55 // performing the pairwise add later.
56 const int32x4_t add_const =
57 vreinterpretq_s32_u64(vdupq_n_u64(1 << (8 + FILTER_BITS - 1)));
58
59 // Loading the 8 filter taps
60 int16x8_t f[8];
61 load_filters_8(f, sx, alpha);
62
63 int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
64 int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
65 int8x16_t f45_u8 = vcombine_s8(vmovn_s16(f[4]), vmovn_s16(f[5]));
66 int8x16_t f67_u8 = vcombine_s8(vmovn_s16(f[6]), vmovn_s16(f[7]));
67
68 uint8x8_t in0 = vget_low_u8(in);
69 uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
70 uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
71 uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
72 uint8x8_t in4 = vget_low_u8(vextq_u8(in, in, 4));
73 uint8x8_t in5 = vget_low_u8(vextq_u8(in, in, 5));
74 uint8x8_t in6 = vget_low_u8(vextq_u8(in, in, 6));
75 uint8x8_t in7 = vget_low_u8(vextq_u8(in, in, 7));
76
77 int32x4_t m01 = vusdotq_s32(add_const, vcombine_u8(in0, in1), f01_u8);
78 int32x4_t m23 = vusdotq_s32(add_const, vcombine_u8(in2, in3), f23_u8);
79 int32x4_t m45 = vusdotq_s32(add_const, vcombine_u8(in4, in5), f45_u8);
80 int32x4_t m67 = vusdotq_s32(add_const, vcombine_u8(in6, in7), f67_u8);
81
82 int32x4_t m0123 = vpaddq_s32(m01, m23);
83 int32x4_t m4567 = vpaddq_s32(m45, m67);
84
85 uint16x8_t res = vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS),
86 vqrshrun_n_s32(m4567, ROUND0_BITS));
87 return vreinterpretq_s16_u16(res);
88 }
89
90 static AOM_FORCE_INLINE int16x8_t
horizontal_filter_4x1_f1_beta0(const uint8x16_t in,int16x8_t f_s16)91 horizontal_filter_4x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
92 const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
93
94 int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
95
96 uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
97 uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
98
99 // Permute samples ready for dot product.
100 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
101 // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
102 uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
103 uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
104
105 int32x4_t m0123 = vusdotq_laneq_s32(add_const, in_0123, f_s8, 0);
106 m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
107
108 uint16x8_t res =
109 vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS), vdup_n_u16(0));
110 return vreinterpretq_s16_u16(res);
111 }
112
horizontal_filter_4x1_f1(const uint8x16_t in,int sx)113 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
114 int sx) {
115 int16x8_t f_s16 =
116 vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS)));
117 return horizontal_filter_4x1_f1_beta0(in, f_s16);
118 }
119
120 static AOM_FORCE_INLINE int16x8_t
horizontal_filter_8x1_f1_beta0(const uint8x16_t in,int16x8_t f_s16)121 horizontal_filter_8x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
122 const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
123
124 int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
125
126 uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
127 uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
128 uint8x16_t perm2 = vld1q_u8(&usdot_permute_idx[32]);
129
130 // Permute samples ready for dot product.
131 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 }
132 // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 }
133 // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
134 uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
135 uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
136 uint8x16_t in_89ab = vqtbl1q_u8(in, perm2);
137
138 int32x4_t m0123 = vusdotq_laneq_s32(add_const, in_0123, f_s8, 0);
139 m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
140
141 int32x4_t m4567 = vusdotq_laneq_s32(add_const, in_4567, f_s8, 0);
142 m4567 = vusdotq_laneq_s32(m4567, in_89ab, f_s8, 1);
143
144 uint16x8_t res = vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS),
145 vqrshrun_n_s32(m4567, ROUND0_BITS));
146 return vreinterpretq_s16_u16(res);
147 }
148
horizontal_filter_8x1_f1(const uint8x16_t in,int sx)149 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
150 int sx) {
151 int16x8_t f_s16 =
152 vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS)));
153 return horizontal_filter_8x1_f1_beta0(in, f_s16);
154 }
155
vertical_filter_4x1_f1(const int16x8_t * src,int32x4_t * res,int sy)156 static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
157 int32x4_t *res, int sy) {
158 int16x4_t s0 = vget_low_s16(src[0]);
159 int16x4_t s1 = vget_low_s16(src[1]);
160 int16x4_t s2 = vget_low_s16(src[2]);
161 int16x4_t s3 = vget_low_s16(src[3]);
162 int16x4_t s4 = vget_low_s16(src[4]);
163 int16x4_t s5 = vget_low_s16(src[5]);
164 int16x4_t s6 = vget_low_s16(src[6]);
165 int16x4_t s7 = vget_low_s16(src[7]);
166
167 int16x8_t f =
168 vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
169
170 int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0);
171 m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1);
172 m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2);
173 m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3);
174 m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0);
175 m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1);
176 m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2);
177 m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3);
178
179 *res = m0123;
180 }
181
vertical_filter_4x1_f4(const int16x8_t * src,int32x4_t * res,int sy,int gamma)182 static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
183 int32x4_t *res, int sy,
184 int gamma) {
185 int16x8_t s0, s1, s2, s3;
186 transpose_elems_s16_4x8(
187 vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
188 vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]),
189 vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3);
190
191 int16x8_t f[4];
192 load_filters_4(f, sy, gamma);
193
194 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]);
195 int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]);
196 int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]);
197 int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]);
198
199 int64x2_t m01 = vpaddq_s64(m0, m1);
200 int64x2_t m23 = vpaddq_s64(m2, m3);
201
202 *res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
203 }
204
vertical_filter_8x1_f1(const int16x8_t * src,int32x4_t * res_low,int32x4_t * res_high,int sy)205 static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
206 int32x4_t *res_low,
207 int32x4_t *res_high,
208 int sy) {
209 int16x8_t s0 = src[0];
210 int16x8_t s1 = src[1];
211 int16x8_t s2 = src[2];
212 int16x8_t s3 = src[3];
213 int16x8_t s4 = src[4];
214 int16x8_t s5 = src[5];
215 int16x8_t s6 = src[6];
216 int16x8_t s7 = src[7];
217
218 int16x8_t f =
219 vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
220
221 int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0);
222 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1);
223 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2);
224 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3);
225 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0);
226 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1);
227 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2);
228 m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3);
229
230 int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0);
231 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1);
232 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2);
233 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3);
234 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0);
235 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1);
236 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2);
237 m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3);
238
239 *res_low = m0123;
240 *res_high = m4567;
241 }
242
vertical_filter_8x1_f8(const int16x8_t * src,int32x4_t * res_low,int32x4_t * res_high,int sy,int gamma)243 static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
244 int32x4_t *res_low,
245 int32x4_t *res_high, int sy,
246 int gamma) {
247 int16x8_t s0 = src[0];
248 int16x8_t s1 = src[1];
249 int16x8_t s2 = src[2];
250 int16x8_t s3 = src[3];
251 int16x8_t s4 = src[4];
252 int16x8_t s5 = src[5];
253 int16x8_t s6 = src[6];
254 int16x8_t s7 = src[7];
255 transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
256
257 int16x8_t f[8];
258 load_filters_8(f, sy, gamma);
259
260 int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]);
261 int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]);
262 int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]);
263 int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]);
264 int64x2_t m4 = aom_sdotq_s16(vdupq_n_s64(0), s4, f[4]);
265 int64x2_t m5 = aom_sdotq_s16(vdupq_n_s64(0), s5, f[5]);
266 int64x2_t m6 = aom_sdotq_s16(vdupq_n_s64(0), s6, f[6]);
267 int64x2_t m7 = aom_sdotq_s16(vdupq_n_s64(0), s7, f[7]);
268
269 int64x2_t m01 = vpaddq_s64(m0, m1);
270 int64x2_t m23 = vpaddq_s64(m2, m3);
271 int64x2_t m45 = vpaddq_s64(m4, m5);
272 int64x2_t m67 = vpaddq_s64(m6, m7);
273
274 *res_low = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
275 *res_high = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67));
276 }
277
av1_warp_affine_sve(const int32_t * mat,const uint8_t * ref,int width,int height,int stride,uint8_t * pred,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,ConvolveParams * conv_params,int16_t alpha,int16_t beta,int16_t gamma,int16_t delta)278 void av1_warp_affine_sve(const int32_t *mat, const uint8_t *ref, int width,
279 int height, int stride, uint8_t *pred, int p_col,
280 int p_row, int p_width, int p_height, int p_stride,
281 int subsampling_x, int subsampling_y,
282 ConvolveParams *conv_params, int16_t alpha,
283 int16_t beta, int16_t gamma, int16_t delta) {
284 av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
285 p_width, p_height, p_stride, subsampling_x,
286 subsampling_y, conv_params, alpha, beta, gamma, delta);
287 }
288