1 /*
2 * Copyright (c) 2021 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/arm/mem_neon.h"
18 #include "vpx_dsp/arm/sum_neon.h"
19
sadwxh_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int w,int h)20 static INLINE unsigned int sadwxh_neon_dotprod(const uint8_t *src_ptr,
21 int src_stride,
22 const uint8_t *ref_ptr,
23 int ref_stride, int w, int h) {
24 // Only two accumulators are required for optimal instruction throughput of
25 // the ABD, UDOT sequence on CPUs with either 2 or 4 Neon pipes.
26 uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
27
28 int i = h;
29 do {
30 int j = 0;
31 do {
32 uint8x16_t s0, s1, r0, r1, diff0, diff1;
33
34 s0 = vld1q_u8(src_ptr + j);
35 r0 = vld1q_u8(ref_ptr + j);
36 diff0 = vabdq_u8(s0, r0);
37 sum[0] = vdotq_u32(sum[0], diff0, vdupq_n_u8(1));
38
39 s1 = vld1q_u8(src_ptr + j + 16);
40 r1 = vld1q_u8(ref_ptr + j + 16);
41 diff1 = vabdq_u8(s1, r1);
42 sum[1] = vdotq_u32(sum[1], diff1, vdupq_n_u8(1));
43
44 j += 32;
45 } while (j < w);
46
47 src_ptr += src_stride;
48 ref_ptr += ref_stride;
49 } while (--i != 0);
50
51 return horizontal_add_uint32x4(vaddq_u32(sum[0], sum[1]));
52 }
53
sad64xh_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h)54 static INLINE unsigned int sad64xh_neon_dotprod(const uint8_t *src_ptr,
55 int src_stride,
56 const uint8_t *ref_ptr,
57 int ref_stride, int h) {
58 return sadwxh_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 64, h);
59 }
60
sad32xh_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h)61 static INLINE unsigned int sad32xh_neon_dotprod(const uint8_t *src_ptr,
62 int src_stride,
63 const uint8_t *ref_ptr,
64 int ref_stride, int h) {
65 return sadwxh_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 32, h);
66 }
67
sad16xh_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h)68 static INLINE unsigned int sad16xh_neon_dotprod(const uint8_t *src_ptr,
69 int src_stride,
70 const uint8_t *ref_ptr,
71 int ref_stride, int h) {
72 uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
73
74 int i = h / 2;
75 do {
76 uint8x16_t s0, s1, r0, r1, diff0, diff1;
77
78 s0 = vld1q_u8(src_ptr);
79 r0 = vld1q_u8(ref_ptr);
80 diff0 = vabdq_u8(s0, r0);
81 sum[0] = vdotq_u32(sum[0], diff0, vdupq_n_u8(1));
82
83 src_ptr += src_stride;
84 ref_ptr += ref_stride;
85
86 s1 = vld1q_u8(src_ptr);
87 r1 = vld1q_u8(ref_ptr);
88 diff1 = vabdq_u8(s1, r1);
89 sum[1] = vdotq_u32(sum[1], diff1, vdupq_n_u8(1));
90
91 src_ptr += src_stride;
92 ref_ptr += ref_stride;
93 } while (--i != 0);
94
95 return horizontal_add_uint32x4(vaddq_u32(sum[0], sum[1]));
96 }
97
98 #define SAD_WXH_NEON_DOTPROD(w, h) \
99 unsigned int vpx_sad##w##x##h##_neon_dotprod( \
100 const uint8_t *src, int src_stride, const uint8_t *ref, \
101 int ref_stride) { \
102 return sad##w##xh_neon_dotprod(src, src_stride, ref, ref_stride, (h)); \
103 }
104
105 SAD_WXH_NEON_DOTPROD(16, 8)
106 SAD_WXH_NEON_DOTPROD(16, 16)
107 SAD_WXH_NEON_DOTPROD(16, 32)
108
109 SAD_WXH_NEON_DOTPROD(32, 16)
110 SAD_WXH_NEON_DOTPROD(32, 32)
111 SAD_WXH_NEON_DOTPROD(32, 64)
112
113 SAD_WXH_NEON_DOTPROD(64, 32)
114 SAD_WXH_NEON_DOTPROD(64, 64)
115
116 #undef SAD_WXH_NEON_DOTPROD
117
118 #define SAD_SKIP_WXH_NEON_DOTPROD(w, h) \
119 unsigned int vpx_sad_skip_##w##x##h##_neon_dotprod( \
120 const uint8_t *src, int src_stride, const uint8_t *ref, \
121 int ref_stride) { \
122 return 2 * sad##w##xh_neon_dotprod(src, 2 * src_stride, ref, \
123 2 * ref_stride, (h) / 2); \
124 }
125
126 SAD_SKIP_WXH_NEON_DOTPROD(16, 8)
127 SAD_SKIP_WXH_NEON_DOTPROD(16, 16)
128 SAD_SKIP_WXH_NEON_DOTPROD(16, 32)
129
130 SAD_SKIP_WXH_NEON_DOTPROD(32, 16)
131 SAD_SKIP_WXH_NEON_DOTPROD(32, 32)
132 SAD_SKIP_WXH_NEON_DOTPROD(32, 64)
133
134 SAD_SKIP_WXH_NEON_DOTPROD(64, 32)
135 SAD_SKIP_WXH_NEON_DOTPROD(64, 64)
136
137 #undef SAD_SKIP_WXH_NEON_DOTPROD
138
sadwxh_avg_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int w,int h,const uint8_t * second_pred)139 static INLINE unsigned int sadwxh_avg_neon_dotprod(const uint8_t *src_ptr,
140 int src_stride,
141 const uint8_t *ref_ptr,
142 int ref_stride, int w, int h,
143 const uint8_t *second_pred) {
144 // Only two accumulators are required for optimal instruction throughput of
145 // the ABD, UDOT sequence on CPUs with either 2 or 4 Neon pipes.
146 uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
147
148 int i = h;
149 do {
150 int j = 0;
151 do {
152 uint8x16_t s0, s1, r0, r1, p0, p1, avg0, avg1, diff0, diff1;
153
154 s0 = vld1q_u8(src_ptr + j);
155 r0 = vld1q_u8(ref_ptr + j);
156 p0 = vld1q_u8(second_pred);
157 avg0 = vrhaddq_u8(r0, p0);
158 diff0 = vabdq_u8(s0, avg0);
159 sum[0] = vdotq_u32(sum[0], diff0, vdupq_n_u8(1));
160
161 s1 = vld1q_u8(src_ptr + j + 16);
162 r1 = vld1q_u8(ref_ptr + j + 16);
163 p1 = vld1q_u8(second_pred + 16);
164 avg1 = vrhaddq_u8(r1, p1);
165 diff1 = vabdq_u8(s1, avg1);
166 sum[1] = vdotq_u32(sum[1], diff1, vdupq_n_u8(1));
167
168 j += 32;
169 second_pred += 32;
170 } while (j < w);
171
172 src_ptr += src_stride;
173 ref_ptr += ref_stride;
174 } while (--i != 0);
175
176 return horizontal_add_uint32x4(vaddq_u32(sum[0], sum[1]));
177 }
178
sad64xh_avg_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h,const uint8_t * second_pred)179 static INLINE unsigned int sad64xh_avg_neon_dotprod(
180 const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
181 int ref_stride, int h, const uint8_t *second_pred) {
182 return sadwxh_avg_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 64,
183 h, second_pred);
184 }
185
sad32xh_avg_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h,const uint8_t * second_pred)186 static INLINE unsigned int sad32xh_avg_neon_dotprod(
187 const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
188 int ref_stride, int h, const uint8_t *second_pred) {
189 return sadwxh_avg_neon_dotprod(src_ptr, src_stride, ref_ptr, ref_stride, 32,
190 h, second_pred);
191 }
192
sad16xh_avg_neon_dotprod(const uint8_t * src_ptr,int src_stride,const uint8_t * ref_ptr,int ref_stride,int h,const uint8_t * second_pred)193 static INLINE unsigned int sad16xh_avg_neon_dotprod(
194 const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,
195 int ref_stride, int h, const uint8_t *second_pred) {
196 uint32x4_t sum[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
197
198 int i = h / 2;
199 do {
200 uint8x16_t s0, s1, r0, r1, p0, p1, avg0, avg1, diff0, diff1;
201
202 s0 = vld1q_u8(src_ptr);
203 r0 = vld1q_u8(ref_ptr);
204 p0 = vld1q_u8(second_pred);
205 avg0 = vrhaddq_u8(r0, p0);
206 diff0 = vabdq_u8(s0, avg0);
207 sum[0] = vdotq_u32(sum[0], diff0, vdupq_n_u8(1));
208
209 src_ptr += src_stride;
210 ref_ptr += ref_stride;
211 second_pred += 16;
212
213 s1 = vld1q_u8(src_ptr);
214 r1 = vld1q_u8(ref_ptr);
215 p1 = vld1q_u8(second_pred);
216 avg1 = vrhaddq_u8(r1, p1);
217 diff1 = vabdq_u8(s1, avg1);
218 sum[1] = vdotq_u32(sum[1], diff1, vdupq_n_u8(1));
219
220 src_ptr += src_stride;
221 ref_ptr += ref_stride;
222 second_pred += 16;
223 } while (--i != 0);
224
225 return horizontal_add_uint32x4(vaddq_u32(sum[0], sum[1]));
226 }
227
228 #define SAD_WXH_AVG_NEON_DOTPROD(w, h) \
229 uint32_t vpx_sad##w##x##h##_avg_neon_dotprod( \
230 const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
231 const uint8_t *second_pred) { \
232 return sad##w##xh_avg_neon_dotprod(src, src_stride, ref, ref_stride, (h), \
233 second_pred); \
234 }
235
236 SAD_WXH_AVG_NEON_DOTPROD(16, 8)
237 SAD_WXH_AVG_NEON_DOTPROD(16, 16)
238 SAD_WXH_AVG_NEON_DOTPROD(16, 32)
239
240 SAD_WXH_AVG_NEON_DOTPROD(32, 16)
241 SAD_WXH_AVG_NEON_DOTPROD(32, 32)
242 SAD_WXH_AVG_NEON_DOTPROD(32, 64)
243
244 SAD_WXH_AVG_NEON_DOTPROD(64, 32)
245 SAD_WXH_AVG_NEON_DOTPROD(64, 64)
246
247 #undef SAD_WXH_AVG_NEON_DOTPROD
248