1*b2055c35SXin Li // Copyright 2012 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // ARM NEON version of dsp functions and loop filtering.
11*b2055c35SXin Li //
12*b2055c35SXin Li // Authors: Somnath Banerjee ([email protected])
13*b2055c35SXin Li // Johann Koenig ([email protected])
14*b2055c35SXin Li
15*b2055c35SXin Li #include "src/dsp/dsp.h"
16*b2055c35SXin Li
17*b2055c35SXin Li #if defined(WEBP_USE_NEON)
18*b2055c35SXin Li
19*b2055c35SXin Li #include "src/dsp/neon.h"
20*b2055c35SXin Li #include "src/dec/vp8i_dec.h"
21*b2055c35SXin Li
22*b2055c35SXin Li //------------------------------------------------------------------------------
23*b2055c35SXin Li // NxM Loading functions
24*b2055c35SXin Li
25*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
26*b2055c35SXin Li
27*b2055c35SXin Li // This intrinsics version makes gcc-4.6.3 crash during Load4x??() compilation
28*b2055c35SXin Li // (register alloc, probably). The variants somewhat mitigate the problem, but
29*b2055c35SXin Li // not quite. HFilter16i() remains problematic.
Load4x8_NEON(const uint8_t * const src,int stride)30*b2055c35SXin Li static WEBP_INLINE uint8x8x4_t Load4x8_NEON(const uint8_t* const src,
31*b2055c35SXin Li int stride) {
32*b2055c35SXin Li const uint8x8_t zero = vdup_n_u8(0);
33*b2055c35SXin Li uint8x8x4_t out;
34*b2055c35SXin Li INIT_VECTOR4(out, zero, zero, zero, zero);
35*b2055c35SXin Li out = vld4_lane_u8(src + 0 * stride, out, 0);
36*b2055c35SXin Li out = vld4_lane_u8(src + 1 * stride, out, 1);
37*b2055c35SXin Li out = vld4_lane_u8(src + 2 * stride, out, 2);
38*b2055c35SXin Li out = vld4_lane_u8(src + 3 * stride, out, 3);
39*b2055c35SXin Li out = vld4_lane_u8(src + 4 * stride, out, 4);
40*b2055c35SXin Li out = vld4_lane_u8(src + 5 * stride, out, 5);
41*b2055c35SXin Li out = vld4_lane_u8(src + 6 * stride, out, 6);
42*b2055c35SXin Li out = vld4_lane_u8(src + 7 * stride, out, 7);
43*b2055c35SXin Li return out;
44*b2055c35SXin Li }
45*b2055c35SXin Li
Load4x16_NEON(const uint8_t * const src,int stride,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1)46*b2055c35SXin Li static WEBP_INLINE void Load4x16_NEON(const uint8_t* const src, int stride,
47*b2055c35SXin Li uint8x16_t* const p1,
48*b2055c35SXin Li uint8x16_t* const p0,
49*b2055c35SXin Li uint8x16_t* const q0,
50*b2055c35SXin Li uint8x16_t* const q1) {
51*b2055c35SXin Li // row0 = p1[0..7]|p0[0..7]|q0[0..7]|q1[0..7]
52*b2055c35SXin Li // row8 = p1[8..15]|p0[8..15]|q0[8..15]|q1[8..15]
53*b2055c35SXin Li const uint8x8x4_t row0 = Load4x8_NEON(src - 2 + 0 * stride, stride);
54*b2055c35SXin Li const uint8x8x4_t row8 = Load4x8_NEON(src - 2 + 8 * stride, stride);
55*b2055c35SXin Li *p1 = vcombine_u8(row0.val[0], row8.val[0]);
56*b2055c35SXin Li *p0 = vcombine_u8(row0.val[1], row8.val[1]);
57*b2055c35SXin Li *q0 = vcombine_u8(row0.val[2], row8.val[2]);
58*b2055c35SXin Li *q1 = vcombine_u8(row0.val[3], row8.val[3]);
59*b2055c35SXin Li }
60*b2055c35SXin Li
61*b2055c35SXin Li #else // WORK_AROUND_GCC
62*b2055c35SXin Li
63*b2055c35SXin Li #define LOADQ_LANE_32b(VALUE, LANE) do { \
64*b2055c35SXin Li (VALUE) = vld1q_lane_u32((const uint32_t*)src, (VALUE), (LANE)); \
65*b2055c35SXin Li src += stride; \
66*b2055c35SXin Li } while (0)
67*b2055c35SXin Li
Load4x16_NEON(const uint8_t * src,int stride,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1)68*b2055c35SXin Li static WEBP_INLINE void Load4x16_NEON(const uint8_t* src, int stride,
69*b2055c35SXin Li uint8x16_t* const p1,
70*b2055c35SXin Li uint8x16_t* const p0,
71*b2055c35SXin Li uint8x16_t* const q0,
72*b2055c35SXin Li uint8x16_t* const q1) {
73*b2055c35SXin Li const uint32x4_t zero = vdupq_n_u32(0);
74*b2055c35SXin Li uint32x4x4_t in;
75*b2055c35SXin Li INIT_VECTOR4(in, zero, zero, zero, zero);
76*b2055c35SXin Li src -= 2;
77*b2055c35SXin Li LOADQ_LANE_32b(in.val[0], 0);
78*b2055c35SXin Li LOADQ_LANE_32b(in.val[1], 0);
79*b2055c35SXin Li LOADQ_LANE_32b(in.val[2], 0);
80*b2055c35SXin Li LOADQ_LANE_32b(in.val[3], 0);
81*b2055c35SXin Li LOADQ_LANE_32b(in.val[0], 1);
82*b2055c35SXin Li LOADQ_LANE_32b(in.val[1], 1);
83*b2055c35SXin Li LOADQ_LANE_32b(in.val[2], 1);
84*b2055c35SXin Li LOADQ_LANE_32b(in.val[3], 1);
85*b2055c35SXin Li LOADQ_LANE_32b(in.val[0], 2);
86*b2055c35SXin Li LOADQ_LANE_32b(in.val[1], 2);
87*b2055c35SXin Li LOADQ_LANE_32b(in.val[2], 2);
88*b2055c35SXin Li LOADQ_LANE_32b(in.val[3], 2);
89*b2055c35SXin Li LOADQ_LANE_32b(in.val[0], 3);
90*b2055c35SXin Li LOADQ_LANE_32b(in.val[1], 3);
91*b2055c35SXin Li LOADQ_LANE_32b(in.val[2], 3);
92*b2055c35SXin Li LOADQ_LANE_32b(in.val[3], 3);
93*b2055c35SXin Li // Transpose four 4x4 parts:
94*b2055c35SXin Li {
95*b2055c35SXin Li const uint8x16x2_t row01 = vtrnq_u8(vreinterpretq_u8_u32(in.val[0]),
96*b2055c35SXin Li vreinterpretq_u8_u32(in.val[1]));
97*b2055c35SXin Li const uint8x16x2_t row23 = vtrnq_u8(vreinterpretq_u8_u32(in.val[2]),
98*b2055c35SXin Li vreinterpretq_u8_u32(in.val[3]));
99*b2055c35SXin Li const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
100*b2055c35SXin Li vreinterpretq_u16_u8(row23.val[0]));
101*b2055c35SXin Li const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
102*b2055c35SXin Li vreinterpretq_u16_u8(row23.val[1]));
103*b2055c35SXin Li *p1 = vreinterpretq_u8_u16(row02.val[0]);
104*b2055c35SXin Li *p0 = vreinterpretq_u8_u16(row13.val[0]);
105*b2055c35SXin Li *q0 = vreinterpretq_u8_u16(row02.val[1]);
106*b2055c35SXin Li *q1 = vreinterpretq_u8_u16(row13.val[1]);
107*b2055c35SXin Li }
108*b2055c35SXin Li }
109*b2055c35SXin Li #undef LOADQ_LANE_32b
110*b2055c35SXin Li
111*b2055c35SXin Li #endif // !WORK_AROUND_GCC
112*b2055c35SXin Li
Load8x16_NEON(const uint8_t * const src,int stride,uint8x16_t * const p3,uint8x16_t * const p2,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1,uint8x16_t * const q2,uint8x16_t * const q3)113*b2055c35SXin Li static WEBP_INLINE void Load8x16_NEON(
114*b2055c35SXin Li const uint8_t* const src, int stride,
115*b2055c35SXin Li uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
116*b2055c35SXin Li uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
117*b2055c35SXin Li uint8x16_t* const q2, uint8x16_t* const q3) {
118*b2055c35SXin Li Load4x16_NEON(src - 2, stride, p3, p2, p1, p0);
119*b2055c35SXin Li Load4x16_NEON(src + 2, stride, q0, q1, q2, q3);
120*b2055c35SXin Li }
121*b2055c35SXin Li
Load16x4_NEON(const uint8_t * const src,int stride,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1)122*b2055c35SXin Li static WEBP_INLINE void Load16x4_NEON(const uint8_t* const src, int stride,
123*b2055c35SXin Li uint8x16_t* const p1,
124*b2055c35SXin Li uint8x16_t* const p0,
125*b2055c35SXin Li uint8x16_t* const q0,
126*b2055c35SXin Li uint8x16_t* const q1) {
127*b2055c35SXin Li *p1 = vld1q_u8(src - 2 * stride);
128*b2055c35SXin Li *p0 = vld1q_u8(src - 1 * stride);
129*b2055c35SXin Li *q0 = vld1q_u8(src + 0 * stride);
130*b2055c35SXin Li *q1 = vld1q_u8(src + 1 * stride);
131*b2055c35SXin Li }
132*b2055c35SXin Li
Load16x8_NEON(const uint8_t * const src,int stride,uint8x16_t * const p3,uint8x16_t * const p2,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1,uint8x16_t * const q2,uint8x16_t * const q3)133*b2055c35SXin Li static WEBP_INLINE void Load16x8_NEON(
134*b2055c35SXin Li const uint8_t* const src, int stride,
135*b2055c35SXin Li uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
136*b2055c35SXin Li uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
137*b2055c35SXin Li uint8x16_t* const q2, uint8x16_t* const q3) {
138*b2055c35SXin Li Load16x4_NEON(src - 2 * stride, stride, p3, p2, p1, p0);
139*b2055c35SXin Li Load16x4_NEON(src + 2 * stride, stride, q0, q1, q2, q3);
140*b2055c35SXin Li }
141*b2055c35SXin Li
Load8x8x2_NEON(const uint8_t * const u,const uint8_t * const v,int stride,uint8x16_t * const p3,uint8x16_t * const p2,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1,uint8x16_t * const q2,uint8x16_t * const q3)142*b2055c35SXin Li static WEBP_INLINE void Load8x8x2_NEON(
143*b2055c35SXin Li const uint8_t* const u, const uint8_t* const v, int stride,
144*b2055c35SXin Li uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
145*b2055c35SXin Li uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
146*b2055c35SXin Li uint8x16_t* const q2, uint8x16_t* const q3) {
147*b2055c35SXin Li // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
148*b2055c35SXin Li // and the v-samples on the higher half.
149*b2055c35SXin Li *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride));
150*b2055c35SXin Li *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride));
151*b2055c35SXin Li *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride));
152*b2055c35SXin Li *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride));
153*b2055c35SXin Li *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride));
154*b2055c35SXin Li *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride));
155*b2055c35SXin Li *q2 = vcombine_u8(vld1_u8(u + 2 * stride), vld1_u8(v + 2 * stride));
156*b2055c35SXin Li *q3 = vcombine_u8(vld1_u8(u + 3 * stride), vld1_u8(v + 3 * stride));
157*b2055c35SXin Li }
158*b2055c35SXin Li
159*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
160*b2055c35SXin Li
161*b2055c35SXin Li #define LOAD_UV_8(ROW) \
162*b2055c35SXin Li vcombine_u8(vld1_u8(u - 4 + (ROW) * stride), vld1_u8(v - 4 + (ROW) * stride))
163*b2055c35SXin Li
Load8x8x2T_NEON(const uint8_t * const u,const uint8_t * const v,int stride,uint8x16_t * const p3,uint8x16_t * const p2,uint8x16_t * const p1,uint8x16_t * const p0,uint8x16_t * const q0,uint8x16_t * const q1,uint8x16_t * const q2,uint8x16_t * const q3)164*b2055c35SXin Li static WEBP_INLINE void Load8x8x2T_NEON(
165*b2055c35SXin Li const uint8_t* const u, const uint8_t* const v, int stride,
166*b2055c35SXin Li uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
167*b2055c35SXin Li uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
168*b2055c35SXin Li uint8x16_t* const q2, uint8x16_t* const q3) {
169*b2055c35SXin Li // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
170*b2055c35SXin Li // and the v-samples on the higher half.
171*b2055c35SXin Li const uint8x16_t row0 = LOAD_UV_8(0);
172*b2055c35SXin Li const uint8x16_t row1 = LOAD_UV_8(1);
173*b2055c35SXin Li const uint8x16_t row2 = LOAD_UV_8(2);
174*b2055c35SXin Li const uint8x16_t row3 = LOAD_UV_8(3);
175*b2055c35SXin Li const uint8x16_t row4 = LOAD_UV_8(4);
176*b2055c35SXin Li const uint8x16_t row5 = LOAD_UV_8(5);
177*b2055c35SXin Li const uint8x16_t row6 = LOAD_UV_8(6);
178*b2055c35SXin Li const uint8x16_t row7 = LOAD_UV_8(7);
179*b2055c35SXin Li // Perform two side-by-side 8x8 transposes
180*b2055c35SXin Li // u00 u01 u02 u03 u04 u05 u06 u07 | v00 v01 v02 v03 v04 v05 v06 v07
181*b2055c35SXin Li // u10 u11 u12 u13 u14 u15 u16 u17 | v10 v11 v12 ...
182*b2055c35SXin Li // u20 u21 u22 u23 u24 u25 u26 u27 | v20 v21 ...
183*b2055c35SXin Li // u30 u31 u32 u33 u34 u35 u36 u37 | ...
184*b2055c35SXin Li // u40 u41 u42 u43 u44 u45 u46 u47 | ...
185*b2055c35SXin Li // u50 u51 u52 u53 u54 u55 u56 u57 | ...
186*b2055c35SXin Li // u60 u61 u62 u63 u64 u65 u66 u67 | v60 ...
187*b2055c35SXin Li // u70 u71 u72 u73 u74 u75 u76 u77 | v70 v71 v72 ...
188*b2055c35SXin Li const uint8x16x2_t row01 = vtrnq_u8(row0, row1); // u00 u10 u02 u12 ...
189*b2055c35SXin Li // u01 u11 u03 u13 ...
190*b2055c35SXin Li const uint8x16x2_t row23 = vtrnq_u8(row2, row3); // u20 u30 u22 u32 ...
191*b2055c35SXin Li // u21 u31 u23 u33 ...
192*b2055c35SXin Li const uint8x16x2_t row45 = vtrnq_u8(row4, row5); // ...
193*b2055c35SXin Li const uint8x16x2_t row67 = vtrnq_u8(row6, row7); // ...
194*b2055c35SXin Li const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
195*b2055c35SXin Li vreinterpretq_u16_u8(row23.val[0]));
196*b2055c35SXin Li const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
197*b2055c35SXin Li vreinterpretq_u16_u8(row23.val[1]));
198*b2055c35SXin Li const uint16x8x2_t row46 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[0]),
199*b2055c35SXin Li vreinterpretq_u16_u8(row67.val[0]));
200*b2055c35SXin Li const uint16x8x2_t row57 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[1]),
201*b2055c35SXin Li vreinterpretq_u16_u8(row67.val[1]));
202*b2055c35SXin Li const uint32x4x2_t row04 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[0]),
203*b2055c35SXin Li vreinterpretq_u32_u16(row46.val[0]));
204*b2055c35SXin Li const uint32x4x2_t row26 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[1]),
205*b2055c35SXin Li vreinterpretq_u32_u16(row46.val[1]));
206*b2055c35SXin Li const uint32x4x2_t row15 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[0]),
207*b2055c35SXin Li vreinterpretq_u32_u16(row57.val[0]));
208*b2055c35SXin Li const uint32x4x2_t row37 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[1]),
209*b2055c35SXin Li vreinterpretq_u32_u16(row57.val[1]));
210*b2055c35SXin Li *p3 = vreinterpretq_u8_u32(row04.val[0]);
211*b2055c35SXin Li *p2 = vreinterpretq_u8_u32(row15.val[0]);
212*b2055c35SXin Li *p1 = vreinterpretq_u8_u32(row26.val[0]);
213*b2055c35SXin Li *p0 = vreinterpretq_u8_u32(row37.val[0]);
214*b2055c35SXin Li *q0 = vreinterpretq_u8_u32(row04.val[1]);
215*b2055c35SXin Li *q1 = vreinterpretq_u8_u32(row15.val[1]);
216*b2055c35SXin Li *q2 = vreinterpretq_u8_u32(row26.val[1]);
217*b2055c35SXin Li *q3 = vreinterpretq_u8_u32(row37.val[1]);
218*b2055c35SXin Li }
219*b2055c35SXin Li #undef LOAD_UV_8
220*b2055c35SXin Li
221*b2055c35SXin Li #endif // !WORK_AROUND_GCC
222*b2055c35SXin Li
Store2x8_NEON(const uint8x8x2_t v,uint8_t * const dst,int stride)223*b2055c35SXin Li static WEBP_INLINE void Store2x8_NEON(const uint8x8x2_t v,
224*b2055c35SXin Li uint8_t* const dst, int stride) {
225*b2055c35SXin Li vst2_lane_u8(dst + 0 * stride, v, 0);
226*b2055c35SXin Li vst2_lane_u8(dst + 1 * stride, v, 1);
227*b2055c35SXin Li vst2_lane_u8(dst + 2 * stride, v, 2);
228*b2055c35SXin Li vst2_lane_u8(dst + 3 * stride, v, 3);
229*b2055c35SXin Li vst2_lane_u8(dst + 4 * stride, v, 4);
230*b2055c35SXin Li vst2_lane_u8(dst + 5 * stride, v, 5);
231*b2055c35SXin Li vst2_lane_u8(dst + 6 * stride, v, 6);
232*b2055c35SXin Li vst2_lane_u8(dst + 7 * stride, v, 7);
233*b2055c35SXin Li }
234*b2055c35SXin Li
Store2x16_NEON(const uint8x16_t p0,const uint8x16_t q0,uint8_t * const dst,int stride)235*b2055c35SXin Li static WEBP_INLINE void Store2x16_NEON(const uint8x16_t p0, const uint8x16_t q0,
236*b2055c35SXin Li uint8_t* const dst, int stride) {
237*b2055c35SXin Li uint8x8x2_t lo, hi;
238*b2055c35SXin Li lo.val[0] = vget_low_u8(p0);
239*b2055c35SXin Li lo.val[1] = vget_low_u8(q0);
240*b2055c35SXin Li hi.val[0] = vget_high_u8(p0);
241*b2055c35SXin Li hi.val[1] = vget_high_u8(q0);
242*b2055c35SXin Li Store2x8_NEON(lo, dst - 1 + 0 * stride, stride);
243*b2055c35SXin Li Store2x8_NEON(hi, dst - 1 + 8 * stride, stride);
244*b2055c35SXin Li }
245*b2055c35SXin Li
246*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
Store4x8_NEON(const uint8x8x4_t v,uint8_t * const dst,int stride)247*b2055c35SXin Li static WEBP_INLINE void Store4x8_NEON(const uint8x8x4_t v,
248*b2055c35SXin Li uint8_t* const dst, int stride) {
249*b2055c35SXin Li vst4_lane_u8(dst + 0 * stride, v, 0);
250*b2055c35SXin Li vst4_lane_u8(dst + 1 * stride, v, 1);
251*b2055c35SXin Li vst4_lane_u8(dst + 2 * stride, v, 2);
252*b2055c35SXin Li vst4_lane_u8(dst + 3 * stride, v, 3);
253*b2055c35SXin Li vst4_lane_u8(dst + 4 * stride, v, 4);
254*b2055c35SXin Li vst4_lane_u8(dst + 5 * stride, v, 5);
255*b2055c35SXin Li vst4_lane_u8(dst + 6 * stride, v, 6);
256*b2055c35SXin Li vst4_lane_u8(dst + 7 * stride, v, 7);
257*b2055c35SXin Li }
258*b2055c35SXin Li
Store4x16_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,uint8_t * const dst,int stride)259*b2055c35SXin Li static WEBP_INLINE void Store4x16_NEON(const uint8x16_t p1, const uint8x16_t p0,
260*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
261*b2055c35SXin Li uint8_t* const dst, int stride) {
262*b2055c35SXin Li uint8x8x4_t lo, hi;
263*b2055c35SXin Li INIT_VECTOR4(lo,
264*b2055c35SXin Li vget_low_u8(p1), vget_low_u8(p0),
265*b2055c35SXin Li vget_low_u8(q0), vget_low_u8(q1));
266*b2055c35SXin Li INIT_VECTOR4(hi,
267*b2055c35SXin Li vget_high_u8(p1), vget_high_u8(p0),
268*b2055c35SXin Li vget_high_u8(q0), vget_high_u8(q1));
269*b2055c35SXin Li Store4x8_NEON(lo, dst - 2 + 0 * stride, stride);
270*b2055c35SXin Li Store4x8_NEON(hi, dst - 2 + 8 * stride, stride);
271*b2055c35SXin Li }
272*b2055c35SXin Li #endif // !WORK_AROUND_GCC
273*b2055c35SXin Li
Store16x2_NEON(const uint8x16_t p0,const uint8x16_t q0,uint8_t * const dst,int stride)274*b2055c35SXin Li static WEBP_INLINE void Store16x2_NEON(const uint8x16_t p0, const uint8x16_t q0,
275*b2055c35SXin Li uint8_t* const dst, int stride) {
276*b2055c35SXin Li vst1q_u8(dst - stride, p0);
277*b2055c35SXin Li vst1q_u8(dst, q0);
278*b2055c35SXin Li }
279*b2055c35SXin Li
Store16x4_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,uint8_t * const dst,int stride)280*b2055c35SXin Li static WEBP_INLINE void Store16x4_NEON(const uint8x16_t p1, const uint8x16_t p0,
281*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
282*b2055c35SXin Li uint8_t* const dst, int stride) {
283*b2055c35SXin Li Store16x2_NEON(p1, p0, dst - stride, stride);
284*b2055c35SXin Li Store16x2_NEON(q0, q1, dst + stride, stride);
285*b2055c35SXin Li }
286*b2055c35SXin Li
Store8x2x2_NEON(const uint8x16_t p0,const uint8x16_t q0,uint8_t * const u,uint8_t * const v,int stride)287*b2055c35SXin Li static WEBP_INLINE void Store8x2x2_NEON(const uint8x16_t p0,
288*b2055c35SXin Li const uint8x16_t q0,
289*b2055c35SXin Li uint8_t* const u, uint8_t* const v,
290*b2055c35SXin Li int stride) {
291*b2055c35SXin Li // p0 and q0 contain the u+v samples packed in low/high halves.
292*b2055c35SXin Li vst1_u8(u - stride, vget_low_u8(p0));
293*b2055c35SXin Li vst1_u8(u, vget_low_u8(q0));
294*b2055c35SXin Li vst1_u8(v - stride, vget_high_u8(p0));
295*b2055c35SXin Li vst1_u8(v, vget_high_u8(q0));
296*b2055c35SXin Li }
297*b2055c35SXin Li
Store8x4x2_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,uint8_t * const u,uint8_t * const v,int stride)298*b2055c35SXin Li static WEBP_INLINE void Store8x4x2_NEON(const uint8x16_t p1,
299*b2055c35SXin Li const uint8x16_t p0,
300*b2055c35SXin Li const uint8x16_t q0,
301*b2055c35SXin Li const uint8x16_t q1,
302*b2055c35SXin Li uint8_t* const u, uint8_t* const v,
303*b2055c35SXin Li int stride) {
304*b2055c35SXin Li // The p1...q1 registers contain the u+v samples packed in low/high halves.
305*b2055c35SXin Li Store8x2x2_NEON(p1, p0, u - stride, v - stride, stride);
306*b2055c35SXin Li Store8x2x2_NEON(q0, q1, u + stride, v + stride, stride);
307*b2055c35SXin Li }
308*b2055c35SXin Li
309*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
310*b2055c35SXin Li
311*b2055c35SXin Li #define STORE6_LANE(DST, VAL0, VAL1, LANE) do { \
312*b2055c35SXin Li vst3_lane_u8((DST) - 3, (VAL0), (LANE)); \
313*b2055c35SXin Li vst3_lane_u8((DST) + 0, (VAL1), (LANE)); \
314*b2055c35SXin Li (DST) += stride; \
315*b2055c35SXin Li } while (0)
316*b2055c35SXin Li
Store6x8x2_NEON(const uint8x16_t p2,const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,const uint8x16_t q2,uint8_t * u,uint8_t * v,int stride)317*b2055c35SXin Li static WEBP_INLINE void Store6x8x2_NEON(
318*b2055c35SXin Li const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0,
319*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2,
320*b2055c35SXin Li uint8_t* u, uint8_t* v, int stride) {
321*b2055c35SXin Li uint8x8x3_t u0, u1, v0, v1;
322*b2055c35SXin Li INIT_VECTOR3(u0, vget_low_u8(p2), vget_low_u8(p1), vget_low_u8(p0));
323*b2055c35SXin Li INIT_VECTOR3(u1, vget_low_u8(q0), vget_low_u8(q1), vget_low_u8(q2));
324*b2055c35SXin Li INIT_VECTOR3(v0, vget_high_u8(p2), vget_high_u8(p1), vget_high_u8(p0));
325*b2055c35SXin Li INIT_VECTOR3(v1, vget_high_u8(q0), vget_high_u8(q1), vget_high_u8(q2));
326*b2055c35SXin Li STORE6_LANE(u, u0, u1, 0);
327*b2055c35SXin Li STORE6_LANE(u, u0, u1, 1);
328*b2055c35SXin Li STORE6_LANE(u, u0, u1, 2);
329*b2055c35SXin Li STORE6_LANE(u, u0, u1, 3);
330*b2055c35SXin Li STORE6_LANE(u, u0, u1, 4);
331*b2055c35SXin Li STORE6_LANE(u, u0, u1, 5);
332*b2055c35SXin Li STORE6_LANE(u, u0, u1, 6);
333*b2055c35SXin Li STORE6_LANE(u, u0, u1, 7);
334*b2055c35SXin Li STORE6_LANE(v, v0, v1, 0);
335*b2055c35SXin Li STORE6_LANE(v, v0, v1, 1);
336*b2055c35SXin Li STORE6_LANE(v, v0, v1, 2);
337*b2055c35SXin Li STORE6_LANE(v, v0, v1, 3);
338*b2055c35SXin Li STORE6_LANE(v, v0, v1, 4);
339*b2055c35SXin Li STORE6_LANE(v, v0, v1, 5);
340*b2055c35SXin Li STORE6_LANE(v, v0, v1, 6);
341*b2055c35SXin Li STORE6_LANE(v, v0, v1, 7);
342*b2055c35SXin Li }
343*b2055c35SXin Li #undef STORE6_LANE
344*b2055c35SXin Li
Store4x8x2_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,uint8_t * const u,uint8_t * const v,int stride)345*b2055c35SXin Li static WEBP_INLINE void Store4x8x2_NEON(const uint8x16_t p1,
346*b2055c35SXin Li const uint8x16_t p0,
347*b2055c35SXin Li const uint8x16_t q0,
348*b2055c35SXin Li const uint8x16_t q1,
349*b2055c35SXin Li uint8_t* const u, uint8_t* const v,
350*b2055c35SXin Li int stride) {
351*b2055c35SXin Li uint8x8x4_t u0, v0;
352*b2055c35SXin Li INIT_VECTOR4(u0,
353*b2055c35SXin Li vget_low_u8(p1), vget_low_u8(p0),
354*b2055c35SXin Li vget_low_u8(q0), vget_low_u8(q1));
355*b2055c35SXin Li INIT_VECTOR4(v0,
356*b2055c35SXin Li vget_high_u8(p1), vget_high_u8(p0),
357*b2055c35SXin Li vget_high_u8(q0), vget_high_u8(q1));
358*b2055c35SXin Li vst4_lane_u8(u - 2 + 0 * stride, u0, 0);
359*b2055c35SXin Li vst4_lane_u8(u - 2 + 1 * stride, u0, 1);
360*b2055c35SXin Li vst4_lane_u8(u - 2 + 2 * stride, u0, 2);
361*b2055c35SXin Li vst4_lane_u8(u - 2 + 3 * stride, u0, 3);
362*b2055c35SXin Li vst4_lane_u8(u - 2 + 4 * stride, u0, 4);
363*b2055c35SXin Li vst4_lane_u8(u - 2 + 5 * stride, u0, 5);
364*b2055c35SXin Li vst4_lane_u8(u - 2 + 6 * stride, u0, 6);
365*b2055c35SXin Li vst4_lane_u8(u - 2 + 7 * stride, u0, 7);
366*b2055c35SXin Li vst4_lane_u8(v - 2 + 0 * stride, v0, 0);
367*b2055c35SXin Li vst4_lane_u8(v - 2 + 1 * stride, v0, 1);
368*b2055c35SXin Li vst4_lane_u8(v - 2 + 2 * stride, v0, 2);
369*b2055c35SXin Li vst4_lane_u8(v - 2 + 3 * stride, v0, 3);
370*b2055c35SXin Li vst4_lane_u8(v - 2 + 4 * stride, v0, 4);
371*b2055c35SXin Li vst4_lane_u8(v - 2 + 5 * stride, v0, 5);
372*b2055c35SXin Li vst4_lane_u8(v - 2 + 6 * stride, v0, 6);
373*b2055c35SXin Li vst4_lane_u8(v - 2 + 7 * stride, v0, 7);
374*b2055c35SXin Li }
375*b2055c35SXin Li
376*b2055c35SXin Li #endif // !WORK_AROUND_GCC
377*b2055c35SXin Li
378*b2055c35SXin Li // Zero extend 'v' to an int16x8_t.
ConvertU8ToS16_NEON(uint8x8_t v)379*b2055c35SXin Li static WEBP_INLINE int16x8_t ConvertU8ToS16_NEON(uint8x8_t v) {
380*b2055c35SXin Li return vreinterpretq_s16_u16(vmovl_u8(v));
381*b2055c35SXin Li }
382*b2055c35SXin Li
383*b2055c35SXin Li // Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result
384*b2055c35SXin Li // to the corresponding rows of 'dst'.
SaturateAndStore4x4_NEON(uint8_t * const dst,const int16x8_t dst01,const int16x8_t dst23)385*b2055c35SXin Li static WEBP_INLINE void SaturateAndStore4x4_NEON(uint8_t* const dst,
386*b2055c35SXin Li const int16x8_t dst01,
387*b2055c35SXin Li const int16x8_t dst23) {
388*b2055c35SXin Li // Unsigned saturate to 8b.
389*b2055c35SXin Li const uint8x8_t dst01_u8 = vqmovun_s16(dst01);
390*b2055c35SXin Li const uint8x8_t dst23_u8 = vqmovun_s16(dst23);
391*b2055c35SXin Li
392*b2055c35SXin Li // Store the results.
393*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 0 * BPS), vreinterpret_u32_u8(dst01_u8), 0);
394*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 1 * BPS), vreinterpret_u32_u8(dst01_u8), 1);
395*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 2 * BPS), vreinterpret_u32_u8(dst23_u8), 0);
396*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 3 * BPS), vreinterpret_u32_u8(dst23_u8), 1);
397*b2055c35SXin Li }
398*b2055c35SXin Li
Add4x4_NEON(const int16x8_t row01,const int16x8_t row23,uint8_t * const dst)399*b2055c35SXin Li static WEBP_INLINE void Add4x4_NEON(const int16x8_t row01,
400*b2055c35SXin Li const int16x8_t row23,
401*b2055c35SXin Li uint8_t* const dst) {
402*b2055c35SXin Li uint32x2_t dst01 = vdup_n_u32(0);
403*b2055c35SXin Li uint32x2_t dst23 = vdup_n_u32(0);
404*b2055c35SXin Li
405*b2055c35SXin Li // Load the source pixels.
406*b2055c35SXin Li dst01 = vld1_lane_u32((uint32_t*)(dst + 0 * BPS), dst01, 0);
407*b2055c35SXin Li dst23 = vld1_lane_u32((uint32_t*)(dst + 2 * BPS), dst23, 0);
408*b2055c35SXin Li dst01 = vld1_lane_u32((uint32_t*)(dst + 1 * BPS), dst01, 1);
409*b2055c35SXin Li dst23 = vld1_lane_u32((uint32_t*)(dst + 3 * BPS), dst23, 1);
410*b2055c35SXin Li
411*b2055c35SXin Li {
412*b2055c35SXin Li // Convert to 16b.
413*b2055c35SXin Li const int16x8_t dst01_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst01));
414*b2055c35SXin Li const int16x8_t dst23_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst23));
415*b2055c35SXin Li
416*b2055c35SXin Li // Descale with rounding.
417*b2055c35SXin Li const int16x8_t out01 = vrsraq_n_s16(dst01_s16, row01, 3);
418*b2055c35SXin Li const int16x8_t out23 = vrsraq_n_s16(dst23_s16, row23, 3);
419*b2055c35SXin Li // Add the inverse transform.
420*b2055c35SXin Li SaturateAndStore4x4_NEON(dst, out01, out23);
421*b2055c35SXin Li }
422*b2055c35SXin Li }
423*b2055c35SXin Li
424*b2055c35SXin Li //-----------------------------------------------------------------------------
425*b2055c35SXin Li // Simple In-loop filtering (Paragraph 15.2)
426*b2055c35SXin Li
NeedsFilter_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,int thresh)427*b2055c35SXin Li static uint8x16_t NeedsFilter_NEON(const uint8x16_t p1, const uint8x16_t p0,
428*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
429*b2055c35SXin Li int thresh) {
430*b2055c35SXin Li const uint8x16_t thresh_v = vdupq_n_u8((uint8_t)thresh);
431*b2055c35SXin Li const uint8x16_t a_p0_q0 = vabdq_u8(p0, q0); // abs(p0-q0)
432*b2055c35SXin Li const uint8x16_t a_p1_q1 = vabdq_u8(p1, q1); // abs(p1-q1)
433*b2055c35SXin Li const uint8x16_t a_p0_q0_2 = vqaddq_u8(a_p0_q0, a_p0_q0); // 2 * abs(p0-q0)
434*b2055c35SXin Li const uint8x16_t a_p1_q1_2 = vshrq_n_u8(a_p1_q1, 1); // abs(p1-q1) / 2
435*b2055c35SXin Li const uint8x16_t sum = vqaddq_u8(a_p0_q0_2, a_p1_q1_2);
436*b2055c35SXin Li const uint8x16_t mask = vcgeq_u8(thresh_v, sum);
437*b2055c35SXin Li return mask;
438*b2055c35SXin Li }
439*b2055c35SXin Li
FlipSign_NEON(const uint8x16_t v)440*b2055c35SXin Li static int8x16_t FlipSign_NEON(const uint8x16_t v) {
441*b2055c35SXin Li const uint8x16_t sign_bit = vdupq_n_u8(0x80);
442*b2055c35SXin Li return vreinterpretq_s8_u8(veorq_u8(v, sign_bit));
443*b2055c35SXin Li }
444*b2055c35SXin Li
FlipSignBack_NEON(const int8x16_t v)445*b2055c35SXin Li static uint8x16_t FlipSignBack_NEON(const int8x16_t v) {
446*b2055c35SXin Li const int8x16_t sign_bit = vdupq_n_s8(0x80);
447*b2055c35SXin Li return vreinterpretq_u8_s8(veorq_s8(v, sign_bit));
448*b2055c35SXin Li }
449*b2055c35SXin Li
GetBaseDelta_NEON(const int8x16_t p1,const int8x16_t p0,const int8x16_t q0,const int8x16_t q1)450*b2055c35SXin Li static int8x16_t GetBaseDelta_NEON(const int8x16_t p1, const int8x16_t p0,
451*b2055c35SXin Li const int8x16_t q0, const int8x16_t q1) {
452*b2055c35SXin Li const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
453*b2055c35SXin Li const int8x16_t p1_q1 = vqsubq_s8(p1, q1); // (p1-q1)
454*b2055c35SXin Li const int8x16_t s1 = vqaddq_s8(p1_q1, q0_p0); // (p1-q1) + 1 * (q0 - p0)
455*b2055c35SXin Li const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // (p1-q1) + 2 * (q0 - p0)
456*b2055c35SXin Li const int8x16_t s3 = vqaddq_s8(q0_p0, s2); // (p1-q1) + 3 * (q0 - p0)
457*b2055c35SXin Li return s3;
458*b2055c35SXin Li }
459*b2055c35SXin Li
GetBaseDelta0_NEON(const int8x16_t p0,const int8x16_t q0)460*b2055c35SXin Li static int8x16_t GetBaseDelta0_NEON(const int8x16_t p0, const int8x16_t q0) {
461*b2055c35SXin Li const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
462*b2055c35SXin Li const int8x16_t s1 = vqaddq_s8(q0_p0, q0_p0); // 2 * (q0 - p0)
463*b2055c35SXin Li const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // 3 * (q0 - p0)
464*b2055c35SXin Li return s2;
465*b2055c35SXin Li }
466*b2055c35SXin Li
467*b2055c35SXin Li //------------------------------------------------------------------------------
468*b2055c35SXin Li
ApplyFilter2NoFlip_NEON(const int8x16_t p0s,const int8x16_t q0s,const int8x16_t delta,int8x16_t * const op0,int8x16_t * const oq0)469*b2055c35SXin Li static void ApplyFilter2NoFlip_NEON(const int8x16_t p0s, const int8x16_t q0s,
470*b2055c35SXin Li const int8x16_t delta,
471*b2055c35SXin Li int8x16_t* const op0,
472*b2055c35SXin Li int8x16_t* const oq0) {
473*b2055c35SXin Li const int8x16_t kCst3 = vdupq_n_s8(0x03);
474*b2055c35SXin Li const int8x16_t kCst4 = vdupq_n_s8(0x04);
475*b2055c35SXin Li const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3);
476*b2055c35SXin Li const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4);
477*b2055c35SXin Li const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
478*b2055c35SXin Li const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
479*b2055c35SXin Li *op0 = vqaddq_s8(p0s, delta3);
480*b2055c35SXin Li *oq0 = vqsubq_s8(q0s, delta4);
481*b2055c35SXin Li }
482*b2055c35SXin Li
483*b2055c35SXin Li #if defined(WEBP_USE_INTRINSICS)
484*b2055c35SXin Li
ApplyFilter2_NEON(const int8x16_t p0s,const int8x16_t q0s,const int8x16_t delta,uint8x16_t * const op0,uint8x16_t * const oq0)485*b2055c35SXin Li static void ApplyFilter2_NEON(const int8x16_t p0s, const int8x16_t q0s,
486*b2055c35SXin Li const int8x16_t delta,
487*b2055c35SXin Li uint8x16_t* const op0, uint8x16_t* const oq0) {
488*b2055c35SXin Li const int8x16_t kCst3 = vdupq_n_s8(0x03);
489*b2055c35SXin Li const int8x16_t kCst4 = vdupq_n_s8(0x04);
490*b2055c35SXin Li const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3);
491*b2055c35SXin Li const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4);
492*b2055c35SXin Li const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
493*b2055c35SXin Li const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
494*b2055c35SXin Li const int8x16_t sp0 = vqaddq_s8(p0s, delta3);
495*b2055c35SXin Li const int8x16_t sq0 = vqsubq_s8(q0s, delta4);
496*b2055c35SXin Li *op0 = FlipSignBack_NEON(sp0);
497*b2055c35SXin Li *oq0 = FlipSignBack_NEON(sq0);
498*b2055c35SXin Li }
499*b2055c35SXin Li
DoFilter2_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,const uint8x16_t mask,uint8x16_t * const op0,uint8x16_t * const oq0)500*b2055c35SXin Li static void DoFilter2_NEON(const uint8x16_t p1, const uint8x16_t p0,
501*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
502*b2055c35SXin Li const uint8x16_t mask,
503*b2055c35SXin Li uint8x16_t* const op0, uint8x16_t* const oq0) {
504*b2055c35SXin Li const int8x16_t p1s = FlipSign_NEON(p1);
505*b2055c35SXin Li const int8x16_t p0s = FlipSign_NEON(p0);
506*b2055c35SXin Li const int8x16_t q0s = FlipSign_NEON(q0);
507*b2055c35SXin Li const int8x16_t q1s = FlipSign_NEON(q1);
508*b2055c35SXin Li const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
509*b2055c35SXin Li const int8x16_t delta1 = vandq_s8(delta0, vreinterpretq_s8_u8(mask));
510*b2055c35SXin Li ApplyFilter2_NEON(p0s, q0s, delta1, op0, oq0);
511*b2055c35SXin Li }
512*b2055c35SXin Li
SimpleVFilter16_NEON(uint8_t * p,int stride,int thresh)513*b2055c35SXin Li static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) {
514*b2055c35SXin Li uint8x16_t p1, p0, q0, q1, op0, oq0;
515*b2055c35SXin Li Load16x4_NEON(p, stride, &p1, &p0, &q0, &q1);
516*b2055c35SXin Li {
517*b2055c35SXin Li const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
518*b2055c35SXin Li DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0);
519*b2055c35SXin Li }
520*b2055c35SXin Li Store16x2_NEON(op0, oq0, p, stride);
521*b2055c35SXin Li }
522*b2055c35SXin Li
SimpleHFilter16_NEON(uint8_t * p,int stride,int thresh)523*b2055c35SXin Li static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) {
524*b2055c35SXin Li uint8x16_t p1, p0, q0, q1, oq0, op0;
525*b2055c35SXin Li Load4x16_NEON(p, stride, &p1, &p0, &q0, &q1);
526*b2055c35SXin Li {
527*b2055c35SXin Li const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
528*b2055c35SXin Li DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0);
529*b2055c35SXin Li }
530*b2055c35SXin Li Store2x16_NEON(op0, oq0, p, stride);
531*b2055c35SXin Li }
532*b2055c35SXin Li
533*b2055c35SXin Li #else
534*b2055c35SXin Li
535*b2055c35SXin Li // Load/Store vertical edge
536*b2055c35SXin Li #define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
537*b2055c35SXin Li "vld4.8 {" #c1 "[0]," #c2 "[0]," #c3 "[0]," #c4 "[0]}," #b1 "," #stride "\n" \
538*b2055c35SXin Li "vld4.8 {" #c1 "[1]," #c2 "[1]," #c3 "[1]," #c4 "[1]}," #b2 "," #stride "\n" \
539*b2055c35SXin Li "vld4.8 {" #c1 "[2]," #c2 "[2]," #c3 "[2]," #c4 "[2]}," #b1 "," #stride "\n" \
540*b2055c35SXin Li "vld4.8 {" #c1 "[3]," #c2 "[3]," #c3 "[3]," #c4 "[3]}," #b2 "," #stride "\n" \
541*b2055c35SXin Li "vld4.8 {" #c1 "[4]," #c2 "[4]," #c3 "[4]," #c4 "[4]}," #b1 "," #stride "\n" \
542*b2055c35SXin Li "vld4.8 {" #c1 "[5]," #c2 "[5]," #c3 "[5]," #c4 "[5]}," #b2 "," #stride "\n" \
543*b2055c35SXin Li "vld4.8 {" #c1 "[6]," #c2 "[6]," #c3 "[6]," #c4 "[6]}," #b1 "," #stride "\n" \
544*b2055c35SXin Li "vld4.8 {" #c1 "[7]," #c2 "[7]," #c3 "[7]," #c4 "[7]}," #b2 "," #stride "\n"
545*b2055c35SXin Li
546*b2055c35SXin Li #define STORE8x2(c1, c2, p, stride) \
547*b2055c35SXin Li "vst2.8 {" #c1 "[0], " #c2 "[0]}," #p "," #stride " \n" \
548*b2055c35SXin Li "vst2.8 {" #c1 "[1], " #c2 "[1]}," #p "," #stride " \n" \
549*b2055c35SXin Li "vst2.8 {" #c1 "[2], " #c2 "[2]}," #p "," #stride " \n" \
550*b2055c35SXin Li "vst2.8 {" #c1 "[3], " #c2 "[3]}," #p "," #stride " \n" \
551*b2055c35SXin Li "vst2.8 {" #c1 "[4], " #c2 "[4]}," #p "," #stride " \n" \
552*b2055c35SXin Li "vst2.8 {" #c1 "[5], " #c2 "[5]}," #p "," #stride " \n" \
553*b2055c35SXin Li "vst2.8 {" #c1 "[6], " #c2 "[6]}," #p "," #stride " \n" \
554*b2055c35SXin Li "vst2.8 {" #c1 "[7], " #c2 "[7]}," #p "," #stride " \n"
555*b2055c35SXin Li
556*b2055c35SXin Li #define QRegs "q0", "q1", "q2", "q3", \
557*b2055c35SXin Li "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
558*b2055c35SXin Li
559*b2055c35SXin Li #define FLIP_SIGN_BIT2(a, b, s) \
560*b2055c35SXin Li "veor " #a "," #a "," #s " \n" \
561*b2055c35SXin Li "veor " #b "," #b "," #s " \n" \
562*b2055c35SXin Li
563*b2055c35SXin Li #define FLIP_SIGN_BIT4(a, b, c, d, s) \
564*b2055c35SXin Li FLIP_SIGN_BIT2(a, b, s) \
565*b2055c35SXin Li FLIP_SIGN_BIT2(c, d, s) \
566*b2055c35SXin Li
567*b2055c35SXin Li #define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
568*b2055c35SXin Li "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
569*b2055c35SXin Li "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
570*b2055c35SXin Li "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
571*b2055c35SXin Li "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
572*b2055c35SXin Li "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
573*b2055c35SXin Li "vdup.8 q14, " #thresh " \n" \
574*b2055c35SXin Li "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
575*b2055c35SXin Li
576*b2055c35SXin Li #define GET_BASE_DELTA(p1, p0, q0, q1, o) \
577*b2055c35SXin Li "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
578*b2055c35SXin Li "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
579*b2055c35SXin Li "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
580*b2055c35SXin Li "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
581*b2055c35SXin Li "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
582*b2055c35SXin Li
583*b2055c35SXin Li #define DO_SIMPLE_FILTER(p0, q0, fl) \
584*b2055c35SXin Li "vmov.i8 q15, #0x03 \n" \
585*b2055c35SXin Li "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
586*b2055c35SXin Li "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
587*b2055c35SXin Li "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
588*b2055c35SXin Li \
589*b2055c35SXin Li "vmov.i8 q15, #0x04 \n" \
590*b2055c35SXin Li "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
591*b2055c35SXin Li "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
592*b2055c35SXin Li "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
593*b2055c35SXin Li
594*b2055c35SXin Li // Applies filter on 2 pixels (p0 and q0)
595*b2055c35SXin Li #define DO_FILTER2(p1, p0, q0, q1, thresh) \
596*b2055c35SXin Li NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
597*b2055c35SXin Li "vmov.i8 q10, #0x80 \n" /* sign bit */ \
598*b2055c35SXin Li FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
599*b2055c35SXin Li GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
600*b2055c35SXin Li "vand q9, q9, q11 \n" /* apply filter mask */ \
601*b2055c35SXin Li DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
602*b2055c35SXin Li FLIP_SIGN_BIT2(p0, q0, q10)
603*b2055c35SXin Li
SimpleVFilter16_NEON(uint8_t * p,int stride,int thresh)604*b2055c35SXin Li static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) {
605*b2055c35SXin Li __asm__ volatile (
606*b2055c35SXin Li "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
607*b2055c35SXin Li
608*b2055c35SXin Li "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
609*b2055c35SXin Li "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
610*b2055c35SXin Li "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
611*b2055c35SXin Li "vld1.u8 {q12}, [%[p]] \n" // q1
612*b2055c35SXin Li
613*b2055c35SXin Li DO_FILTER2(q1, q2, q3, q12, %[thresh])
614*b2055c35SXin Li
615*b2055c35SXin Li "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
616*b2055c35SXin Li
617*b2055c35SXin Li "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0
618*b2055c35SXin Li "vst1.u8 {q3}, [%[p]] \n" // store oq0
619*b2055c35SXin Li : [p] "+r"(p)
620*b2055c35SXin Li : [stride] "r"(stride), [thresh] "r"(thresh)
621*b2055c35SXin Li : "memory", QRegs
622*b2055c35SXin Li );
623*b2055c35SXin Li }
624*b2055c35SXin Li
SimpleHFilter16_NEON(uint8_t * p,int stride,int thresh)625*b2055c35SXin Li static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) {
626*b2055c35SXin Li __asm__ volatile (
627*b2055c35SXin Li "sub r4, %[p], #2 \n" // base1 = p - 2
628*b2055c35SXin Li "lsl r6, %[stride], #1 \n" // r6 = 2 * stride
629*b2055c35SXin Li "add r5, r4, %[stride] \n" // base2 = base1 + stride
630*b2055c35SXin Li
631*b2055c35SXin Li LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
632*b2055c35SXin Li LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
633*b2055c35SXin Li "vswp d3, d24 \n" // p1:q1 p0:q3
634*b2055c35SXin Li "vswp d5, d26 \n" // q0:q2 q1:q4
635*b2055c35SXin Li "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
636*b2055c35SXin Li
637*b2055c35SXin Li DO_FILTER2(q1, q2, q12, q13, %[thresh])
638*b2055c35SXin Li
639*b2055c35SXin Li "sub %[p], %[p], #1 \n" // p - 1
640*b2055c35SXin Li
641*b2055c35SXin Li "vswp d5, d24 \n"
642*b2055c35SXin Li STORE8x2(d4, d5, [%[p]], %[stride])
643*b2055c35SXin Li STORE8x2(d24, d25, [%[p]], %[stride])
644*b2055c35SXin Li
645*b2055c35SXin Li : [p] "+r"(p)
646*b2055c35SXin Li : [stride] "r"(stride), [thresh] "r"(thresh)
647*b2055c35SXin Li : "memory", "r4", "r5", "r6", QRegs
648*b2055c35SXin Li );
649*b2055c35SXin Li }
650*b2055c35SXin Li
651*b2055c35SXin Li #undef LOAD8x4
652*b2055c35SXin Li #undef STORE8x2
653*b2055c35SXin Li
654*b2055c35SXin Li #endif // WEBP_USE_INTRINSICS
655*b2055c35SXin Li
SimpleVFilter16i_NEON(uint8_t * p,int stride,int thresh)656*b2055c35SXin Li static void SimpleVFilter16i_NEON(uint8_t* p, int stride, int thresh) {
657*b2055c35SXin Li uint32_t k;
658*b2055c35SXin Li for (k = 3; k != 0; --k) {
659*b2055c35SXin Li p += 4 * stride;
660*b2055c35SXin Li SimpleVFilter16_NEON(p, stride, thresh);
661*b2055c35SXin Li }
662*b2055c35SXin Li }
663*b2055c35SXin Li
SimpleHFilter16i_NEON(uint8_t * p,int stride,int thresh)664*b2055c35SXin Li static void SimpleHFilter16i_NEON(uint8_t* p, int stride, int thresh) {
665*b2055c35SXin Li uint32_t k;
666*b2055c35SXin Li for (k = 3; k != 0; --k) {
667*b2055c35SXin Li p += 4;
668*b2055c35SXin Li SimpleHFilter16_NEON(p, stride, thresh);
669*b2055c35SXin Li }
670*b2055c35SXin Li }
671*b2055c35SXin Li
672*b2055c35SXin Li //------------------------------------------------------------------------------
673*b2055c35SXin Li // Complex In-loop filtering (Paragraph 15.3)
674*b2055c35SXin Li
NeedsHev_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,int hev_thresh)675*b2055c35SXin Li static uint8x16_t NeedsHev_NEON(const uint8x16_t p1, const uint8x16_t p0,
676*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
677*b2055c35SXin Li int hev_thresh) {
678*b2055c35SXin Li const uint8x16_t hev_thresh_v = vdupq_n_u8((uint8_t)hev_thresh);
679*b2055c35SXin Li const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
680*b2055c35SXin Li const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
681*b2055c35SXin Li const uint8x16_t a_max = vmaxq_u8(a_p1_p0, a_q1_q0);
682*b2055c35SXin Li const uint8x16_t mask = vcgtq_u8(a_max, hev_thresh_v);
683*b2055c35SXin Li return mask;
684*b2055c35SXin Li }
685*b2055c35SXin Li
NeedsFilter2_NEON(const uint8x16_t p3,const uint8x16_t p2,const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,const uint8x16_t q2,const uint8x16_t q3,int ithresh,int thresh)686*b2055c35SXin Li static uint8x16_t NeedsFilter2_NEON(const uint8x16_t p3, const uint8x16_t p2,
687*b2055c35SXin Li const uint8x16_t p1, const uint8x16_t p0,
688*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
689*b2055c35SXin Li const uint8x16_t q2, const uint8x16_t q3,
690*b2055c35SXin Li int ithresh, int thresh) {
691*b2055c35SXin Li const uint8x16_t ithresh_v = vdupq_n_u8((uint8_t)ithresh);
692*b2055c35SXin Li const uint8x16_t a_p3_p2 = vabdq_u8(p3, p2); // abs(p3 - p2)
693*b2055c35SXin Li const uint8x16_t a_p2_p1 = vabdq_u8(p2, p1); // abs(p2 - p1)
694*b2055c35SXin Li const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
695*b2055c35SXin Li const uint8x16_t a_q3_q2 = vabdq_u8(q3, q2); // abs(q3 - q2)
696*b2055c35SXin Li const uint8x16_t a_q2_q1 = vabdq_u8(q2, q1); // abs(q2 - q1)
697*b2055c35SXin Li const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
698*b2055c35SXin Li const uint8x16_t max1 = vmaxq_u8(a_p3_p2, a_p2_p1);
699*b2055c35SXin Li const uint8x16_t max2 = vmaxq_u8(a_p1_p0, a_q3_q2);
700*b2055c35SXin Li const uint8x16_t max3 = vmaxq_u8(a_q2_q1, a_q1_q0);
701*b2055c35SXin Li const uint8x16_t max12 = vmaxq_u8(max1, max2);
702*b2055c35SXin Li const uint8x16_t max123 = vmaxq_u8(max12, max3);
703*b2055c35SXin Li const uint8x16_t mask2 = vcgeq_u8(ithresh_v, max123);
704*b2055c35SXin Li const uint8x16_t mask1 = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
705*b2055c35SXin Li const uint8x16_t mask = vandq_u8(mask1, mask2);
706*b2055c35SXin Li return mask;
707*b2055c35SXin Li }
708*b2055c35SXin Li
709*b2055c35SXin Li // 4-points filter
710*b2055c35SXin Li
ApplyFilter4_NEON(const int8x16_t p1,const int8x16_t p0,const int8x16_t q0,const int8x16_t q1,const int8x16_t delta0,uint8x16_t * const op1,uint8x16_t * const op0,uint8x16_t * const oq0,uint8x16_t * const oq1)711*b2055c35SXin Li static void ApplyFilter4_NEON(
712*b2055c35SXin Li const int8x16_t p1, const int8x16_t p0,
713*b2055c35SXin Li const int8x16_t q0, const int8x16_t q1,
714*b2055c35SXin Li const int8x16_t delta0,
715*b2055c35SXin Li uint8x16_t* const op1, uint8x16_t* const op0,
716*b2055c35SXin Li uint8x16_t* const oq0, uint8x16_t* const oq1) {
717*b2055c35SXin Li const int8x16_t kCst3 = vdupq_n_s8(0x03);
718*b2055c35SXin Li const int8x16_t kCst4 = vdupq_n_s8(0x04);
719*b2055c35SXin Li const int8x16_t delta1 = vqaddq_s8(delta0, kCst4);
720*b2055c35SXin Li const int8x16_t delta2 = vqaddq_s8(delta0, kCst3);
721*b2055c35SXin Li const int8x16_t a1 = vshrq_n_s8(delta1, 3);
722*b2055c35SXin Li const int8x16_t a2 = vshrq_n_s8(delta2, 3);
723*b2055c35SXin Li const int8x16_t a3 = vrshrq_n_s8(a1, 1); // a3 = (a1 + 1) >> 1
724*b2055c35SXin Li *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a2)); // clip(p0 + a2)
725*b2055c35SXin Li *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - a1)
726*b2055c35SXin Li *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a3)); // clip(p1 + a3)
727*b2055c35SXin Li *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a3)); // clip(q1 - a3)
728*b2055c35SXin Li }
729*b2055c35SXin Li
DoFilter4_NEON(const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,const uint8x16_t mask,const uint8x16_t hev_mask,uint8x16_t * const op1,uint8x16_t * const op0,uint8x16_t * const oq0,uint8x16_t * const oq1)730*b2055c35SXin Li static void DoFilter4_NEON(
731*b2055c35SXin Li const uint8x16_t p1, const uint8x16_t p0,
732*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1,
733*b2055c35SXin Li const uint8x16_t mask, const uint8x16_t hev_mask,
734*b2055c35SXin Li uint8x16_t* const op1, uint8x16_t* const op0,
735*b2055c35SXin Li uint8x16_t* const oq0, uint8x16_t* const oq1) {
736*b2055c35SXin Li // This is a fused version of DoFilter2() calling ApplyFilter2 directly
737*b2055c35SXin Li const int8x16_t p1s = FlipSign_NEON(p1);
738*b2055c35SXin Li int8x16_t p0s = FlipSign_NEON(p0);
739*b2055c35SXin Li int8x16_t q0s = FlipSign_NEON(q0);
740*b2055c35SXin Li const int8x16_t q1s = FlipSign_NEON(q1);
741*b2055c35SXin Li const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
742*b2055c35SXin Li
743*b2055c35SXin Li // do_filter2 part (simple loopfilter on pixels with hev)
744*b2055c35SXin Li {
745*b2055c35SXin Li const int8x16_t delta = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
746*b2055c35SXin Li const int8x16_t simple_lf_delta =
747*b2055c35SXin Li vandq_s8(delta, vreinterpretq_s8_u8(simple_lf_mask));
748*b2055c35SXin Li ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s);
749*b2055c35SXin Li }
750*b2055c35SXin Li
751*b2055c35SXin Li // do_filter4 part (complex loopfilter on pixels without hev)
752*b2055c35SXin Li {
753*b2055c35SXin Li const int8x16_t delta0 = GetBaseDelta0_NEON(p0s, q0s);
754*b2055c35SXin Li // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
755*b2055c35SXin Li const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
756*b2055c35SXin Li const int8x16_t complex_lf_delta =
757*b2055c35SXin Li vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
758*b2055c35SXin Li ApplyFilter4_NEON(p1s, p0s, q0s, q1s, complex_lf_delta, op1, op0, oq0, oq1);
759*b2055c35SXin Li }
760*b2055c35SXin Li }
761*b2055c35SXin Li
762*b2055c35SXin Li // 6-points filter
763*b2055c35SXin Li
ApplyFilter6_NEON(const int8x16_t p2,const int8x16_t p1,const int8x16_t p0,const int8x16_t q0,const int8x16_t q1,const int8x16_t q2,const int8x16_t delta,uint8x16_t * const op2,uint8x16_t * const op1,uint8x16_t * const op0,uint8x16_t * const oq0,uint8x16_t * const oq1,uint8x16_t * const oq2)764*b2055c35SXin Li static void ApplyFilter6_NEON(
765*b2055c35SXin Li const int8x16_t p2, const int8x16_t p1, const int8x16_t p0,
766*b2055c35SXin Li const int8x16_t q0, const int8x16_t q1, const int8x16_t q2,
767*b2055c35SXin Li const int8x16_t delta,
768*b2055c35SXin Li uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
769*b2055c35SXin Li uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
770*b2055c35SXin Li // We have to compute: X = (9*a+63) >> 7, Y = (18*a+63)>>7, Z = (27*a+63) >> 7
771*b2055c35SXin Li // Turns out, there's a common sub-expression S=9 * a - 1 that can be used
772*b2055c35SXin Li // with the special vqrshrn_n_s16 rounding-shift-and-narrow instruction:
773*b2055c35SXin Li // X = (S + 64) >> 7, Y = (S + 32) >> 6, Z = (18 * a + S + 64) >> 7
774*b2055c35SXin Li const int8x8_t delta_lo = vget_low_s8(delta);
775*b2055c35SXin Li const int8x8_t delta_hi = vget_high_s8(delta);
776*b2055c35SXin Li const int8x8_t kCst9 = vdup_n_s8(9);
777*b2055c35SXin Li const int16x8_t kCstm1 = vdupq_n_s16(-1);
778*b2055c35SXin Li const int8x8_t kCst18 = vdup_n_s8(18);
779*b2055c35SXin Li const int16x8_t S_lo = vmlal_s8(kCstm1, kCst9, delta_lo); // S = 9 * a - 1
780*b2055c35SXin Li const int16x8_t S_hi = vmlal_s8(kCstm1, kCst9, delta_hi);
781*b2055c35SXin Li const int16x8_t Z_lo = vmlal_s8(S_lo, kCst18, delta_lo); // S + 18 * a
782*b2055c35SXin Li const int16x8_t Z_hi = vmlal_s8(S_hi, kCst18, delta_hi);
783*b2055c35SXin Li const int8x8_t a3_lo = vqrshrn_n_s16(S_lo, 7); // (9 * a + 63) >> 7
784*b2055c35SXin Li const int8x8_t a3_hi = vqrshrn_n_s16(S_hi, 7);
785*b2055c35SXin Li const int8x8_t a2_lo = vqrshrn_n_s16(S_lo, 6); // (9 * a + 31) >> 6
786*b2055c35SXin Li const int8x8_t a2_hi = vqrshrn_n_s16(S_hi, 6);
787*b2055c35SXin Li const int8x8_t a1_lo = vqrshrn_n_s16(Z_lo, 7); // (27 * a + 63) >> 7
788*b2055c35SXin Li const int8x8_t a1_hi = vqrshrn_n_s16(Z_hi, 7);
789*b2055c35SXin Li const int8x16_t a1 = vcombine_s8(a1_lo, a1_hi);
790*b2055c35SXin Li const int8x16_t a2 = vcombine_s8(a2_lo, a2_hi);
791*b2055c35SXin Li const int8x16_t a3 = vcombine_s8(a3_lo, a3_hi);
792*b2055c35SXin Li
793*b2055c35SXin Li *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a1)); // clip(p0 + a1)
794*b2055c35SXin Li *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - q1)
795*b2055c35SXin Li *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a2)); // clip(q1 - a2)
796*b2055c35SXin Li *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a2)); // clip(p1 + a2)
797*b2055c35SXin Li *oq2 = FlipSignBack_NEON(vqsubq_s8(q2, a3)); // clip(q2 - a3)
798*b2055c35SXin Li *op2 = FlipSignBack_NEON(vqaddq_s8(p2, a3)); // clip(p2 + a3)
799*b2055c35SXin Li }
800*b2055c35SXin Li
DoFilter6_NEON(const uint8x16_t p2,const uint8x16_t p1,const uint8x16_t p0,const uint8x16_t q0,const uint8x16_t q1,const uint8x16_t q2,const uint8x16_t mask,const uint8x16_t hev_mask,uint8x16_t * const op2,uint8x16_t * const op1,uint8x16_t * const op0,uint8x16_t * const oq0,uint8x16_t * const oq1,uint8x16_t * const oq2)801*b2055c35SXin Li static void DoFilter6_NEON(
802*b2055c35SXin Li const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0,
803*b2055c35SXin Li const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2,
804*b2055c35SXin Li const uint8x16_t mask, const uint8x16_t hev_mask,
805*b2055c35SXin Li uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
806*b2055c35SXin Li uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
807*b2055c35SXin Li // This is a fused version of DoFilter2() calling ApplyFilter2 directly
808*b2055c35SXin Li const int8x16_t p2s = FlipSign_NEON(p2);
809*b2055c35SXin Li const int8x16_t p1s = FlipSign_NEON(p1);
810*b2055c35SXin Li int8x16_t p0s = FlipSign_NEON(p0);
811*b2055c35SXin Li int8x16_t q0s = FlipSign_NEON(q0);
812*b2055c35SXin Li const int8x16_t q1s = FlipSign_NEON(q1);
813*b2055c35SXin Li const int8x16_t q2s = FlipSign_NEON(q2);
814*b2055c35SXin Li const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
815*b2055c35SXin Li const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
816*b2055c35SXin Li
817*b2055c35SXin Li // do_filter2 part (simple loopfilter on pixels with hev)
818*b2055c35SXin Li {
819*b2055c35SXin Li const int8x16_t simple_lf_delta =
820*b2055c35SXin Li vandq_s8(delta0, vreinterpretq_s8_u8(simple_lf_mask));
821*b2055c35SXin Li ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s);
822*b2055c35SXin Li }
823*b2055c35SXin Li
824*b2055c35SXin Li // do_filter6 part (complex loopfilter on pixels without hev)
825*b2055c35SXin Li {
826*b2055c35SXin Li // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
827*b2055c35SXin Li const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
828*b2055c35SXin Li const int8x16_t complex_lf_delta =
829*b2055c35SXin Li vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
830*b2055c35SXin Li ApplyFilter6_NEON(p2s, p1s, p0s, q0s, q1s, q2s, complex_lf_delta,
831*b2055c35SXin Li op2, op1, op0, oq0, oq1, oq2);
832*b2055c35SXin Li }
833*b2055c35SXin Li }
834*b2055c35SXin Li
835*b2055c35SXin Li // on macroblock edges
836*b2055c35SXin Li
VFilter16_NEON(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)837*b2055c35SXin Li static void VFilter16_NEON(uint8_t* p, int stride,
838*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
839*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
840*b2055c35SXin Li Load16x8_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
841*b2055c35SXin Li {
842*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
843*b2055c35SXin Li ithresh, thresh);
844*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
845*b2055c35SXin Li uint8x16_t op2, op1, op0, oq0, oq1, oq2;
846*b2055c35SXin Li DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
847*b2055c35SXin Li &op2, &op1, &op0, &oq0, &oq1, &oq2);
848*b2055c35SXin Li Store16x2_NEON(op2, op1, p - 2 * stride, stride);
849*b2055c35SXin Li Store16x2_NEON(op0, oq0, p + 0 * stride, stride);
850*b2055c35SXin Li Store16x2_NEON(oq1, oq2, p + 2 * stride, stride);
851*b2055c35SXin Li }
852*b2055c35SXin Li }
853*b2055c35SXin Li
HFilter16_NEON(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)854*b2055c35SXin Li static void HFilter16_NEON(uint8_t* p, int stride,
855*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
856*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
857*b2055c35SXin Li Load8x16_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
858*b2055c35SXin Li {
859*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
860*b2055c35SXin Li ithresh, thresh);
861*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
862*b2055c35SXin Li uint8x16_t op2, op1, op0, oq0, oq1, oq2;
863*b2055c35SXin Li DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
864*b2055c35SXin Li &op2, &op1, &op0, &oq0, &oq1, &oq2);
865*b2055c35SXin Li Store2x16_NEON(op2, op1, p - 2, stride);
866*b2055c35SXin Li Store2x16_NEON(op0, oq0, p + 0, stride);
867*b2055c35SXin Li Store2x16_NEON(oq1, oq2, p + 2, stride);
868*b2055c35SXin Li }
869*b2055c35SXin Li }
870*b2055c35SXin Li
871*b2055c35SXin Li // on three inner edges
VFilter16i_NEON(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)872*b2055c35SXin Li static void VFilter16i_NEON(uint8_t* p, int stride,
873*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
874*b2055c35SXin Li uint32_t k;
875*b2055c35SXin Li uint8x16_t p3, p2, p1, p0;
876*b2055c35SXin Li Load16x4_NEON(p + 2 * stride, stride, &p3, &p2, &p1, &p0);
877*b2055c35SXin Li for (k = 3; k != 0; --k) {
878*b2055c35SXin Li uint8x16_t q0, q1, q2, q3;
879*b2055c35SXin Li p += 4 * stride;
880*b2055c35SXin Li Load16x4_NEON(p + 2 * stride, stride, &q0, &q1, &q2, &q3);
881*b2055c35SXin Li {
882*b2055c35SXin Li const uint8x16_t mask =
883*b2055c35SXin Li NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
884*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
885*b2055c35SXin Li // p3 and p2 are not just temporary variables here: they will be
886*b2055c35SXin Li // re-used for next span. And q2/q3 will become p1/p0 accordingly.
887*b2055c35SXin Li DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
888*b2055c35SXin Li Store16x4_NEON(p1, p0, p3, p2, p, stride);
889*b2055c35SXin Li p1 = q2;
890*b2055c35SXin Li p0 = q3;
891*b2055c35SXin Li }
892*b2055c35SXin Li }
893*b2055c35SXin Li }
894*b2055c35SXin Li
895*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
HFilter16i_NEON(uint8_t * p,int stride,int thresh,int ithresh,int hev_thresh)896*b2055c35SXin Li static void HFilter16i_NEON(uint8_t* p, int stride,
897*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
898*b2055c35SXin Li uint32_t k;
899*b2055c35SXin Li uint8x16_t p3, p2, p1, p0;
900*b2055c35SXin Li Load4x16_NEON(p + 2, stride, &p3, &p2, &p1, &p0);
901*b2055c35SXin Li for (k = 3; k != 0; --k) {
902*b2055c35SXin Li uint8x16_t q0, q1, q2, q3;
903*b2055c35SXin Li p += 4;
904*b2055c35SXin Li Load4x16_NEON(p + 2, stride, &q0, &q1, &q2, &q3);
905*b2055c35SXin Li {
906*b2055c35SXin Li const uint8x16_t mask =
907*b2055c35SXin Li NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
908*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
909*b2055c35SXin Li DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
910*b2055c35SXin Li Store4x16_NEON(p1, p0, p3, p2, p, stride);
911*b2055c35SXin Li p1 = q2;
912*b2055c35SXin Li p0 = q3;
913*b2055c35SXin Li }
914*b2055c35SXin Li }
915*b2055c35SXin Li }
916*b2055c35SXin Li #endif // !WORK_AROUND_GCC
917*b2055c35SXin Li
918*b2055c35SXin Li // 8-pixels wide variant, for chroma filtering
VFilter8_NEON(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)919*b2055c35SXin Li static void VFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
920*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
921*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
922*b2055c35SXin Li Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
923*b2055c35SXin Li {
924*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
925*b2055c35SXin Li ithresh, thresh);
926*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
927*b2055c35SXin Li uint8x16_t op2, op1, op0, oq0, oq1, oq2;
928*b2055c35SXin Li DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
929*b2055c35SXin Li &op2, &op1, &op0, &oq0, &oq1, &oq2);
930*b2055c35SXin Li Store8x2x2_NEON(op2, op1, u - 2 * stride, v - 2 * stride, stride);
931*b2055c35SXin Li Store8x2x2_NEON(op0, oq0, u + 0 * stride, v + 0 * stride, stride);
932*b2055c35SXin Li Store8x2x2_NEON(oq1, oq2, u + 2 * stride, v + 2 * stride, stride);
933*b2055c35SXin Li }
934*b2055c35SXin Li }
VFilter8i_NEON(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)935*b2055c35SXin Li static void VFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
936*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
937*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
938*b2055c35SXin Li u += 4 * stride;
939*b2055c35SXin Li v += 4 * stride;
940*b2055c35SXin Li Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
941*b2055c35SXin Li {
942*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
943*b2055c35SXin Li ithresh, thresh);
944*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
945*b2055c35SXin Li uint8x16_t op1, op0, oq0, oq1;
946*b2055c35SXin Li DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
947*b2055c35SXin Li Store8x4x2_NEON(op1, op0, oq0, oq1, u, v, stride);
948*b2055c35SXin Li }
949*b2055c35SXin Li }
950*b2055c35SXin Li
951*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
HFilter8_NEON(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)952*b2055c35SXin Li static void HFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
953*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
954*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
955*b2055c35SXin Li Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
956*b2055c35SXin Li {
957*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
958*b2055c35SXin Li ithresh, thresh);
959*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
960*b2055c35SXin Li uint8x16_t op2, op1, op0, oq0, oq1, oq2;
961*b2055c35SXin Li DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
962*b2055c35SXin Li &op2, &op1, &op0, &oq0, &oq1, &oq2);
963*b2055c35SXin Li Store6x8x2_NEON(op2, op1, op0, oq0, oq1, oq2, u, v, stride);
964*b2055c35SXin Li }
965*b2055c35SXin Li }
966*b2055c35SXin Li
HFilter8i_NEON(uint8_t * u,uint8_t * v,int stride,int thresh,int ithresh,int hev_thresh)967*b2055c35SXin Li static void HFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
968*b2055c35SXin Li int thresh, int ithresh, int hev_thresh) {
969*b2055c35SXin Li uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
970*b2055c35SXin Li u += 4;
971*b2055c35SXin Li v += 4;
972*b2055c35SXin Li Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
973*b2055c35SXin Li {
974*b2055c35SXin Li const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
975*b2055c35SXin Li ithresh, thresh);
976*b2055c35SXin Li const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
977*b2055c35SXin Li uint8x16_t op1, op0, oq0, oq1;
978*b2055c35SXin Li DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
979*b2055c35SXin Li Store4x8x2_NEON(op1, op0, oq0, oq1, u, v, stride);
980*b2055c35SXin Li }
981*b2055c35SXin Li }
982*b2055c35SXin Li #endif // !WORK_AROUND_GCC
983*b2055c35SXin Li
984*b2055c35SXin Li //-----------------------------------------------------------------------------
985*b2055c35SXin Li // Inverse transforms (Paragraph 14.4)
986*b2055c35SXin Li
987*b2055c35SXin Li // Technically these are unsigned but vqdmulh is only available in signed.
988*b2055c35SXin Li // vqdmulh returns high half (effectively >> 16) but also doubles the value,
989*b2055c35SXin Li // changing the >> 16 to >> 15 and requiring an additional >> 1.
990*b2055c35SXin Li // We use this to our advantage with kC2. The canonical value is 35468.
991*b2055c35SXin Li // However, the high bit is set so treating it as signed will give incorrect
992*b2055c35SXin Li // results. We avoid this by down shifting by 1 here to clear the highest bit.
993*b2055c35SXin Li // Combined with the doubling effect of vqdmulh we get >> 16.
994*b2055c35SXin Li // This can not be applied to kC1 because the lowest bit is set. Down shifting
995*b2055c35SXin Li // the constant would reduce precision.
996*b2055c35SXin Li
997*b2055c35SXin Li // libwebp uses a trick to avoid some extra addition that libvpx does.
998*b2055c35SXin Li // Instead of:
999*b2055c35SXin Li // temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
1000*b2055c35SXin Li // libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
1001*b2055c35SXin Li // same issue with kC1 and vqdmulh that we work around by down shifting kC2
1002*b2055c35SXin Li
1003*b2055c35SXin Li static const int16_t kC1 = WEBP_TRANSFORM_AC3_C1;
1004*b2055c35SXin Li static const int16_t kC2 =
1005*b2055c35SXin Li WEBP_TRANSFORM_AC3_C2 / 2; // half of kC2, actually. See comment above.
1006*b2055c35SXin Li
1007*b2055c35SXin Li #if defined(WEBP_USE_INTRINSICS)
Transpose8x2_NEON(const int16x8_t in0,const int16x8_t in1,int16x8x2_t * const out)1008*b2055c35SXin Li static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0,
1009*b2055c35SXin Li const int16x8_t in1,
1010*b2055c35SXin Li int16x8x2_t* const out) {
1011*b2055c35SXin Li // a0 a1 a2 a3 | b0 b1 b2 b3 => a0 b0 c0 d0 | a1 b1 c1 d1
1012*b2055c35SXin Li // c0 c1 c2 c3 | d0 d1 d2 d3 a2 b2 c2 d2 | a3 b3 c3 d3
1013*b2055c35SXin Li const int16x8x2_t tmp0 = vzipq_s16(in0, in1); // a0 c0 a1 c1 a2 c2 ...
1014*b2055c35SXin Li // b0 d0 b1 d1 b2 d2 ...
1015*b2055c35SXin Li *out = vzipq_s16(tmp0.val[0], tmp0.val[1]);
1016*b2055c35SXin Li }
1017*b2055c35SXin Li
TransformPass_NEON(int16x8x2_t * const rows)1018*b2055c35SXin Li static WEBP_INLINE void TransformPass_NEON(int16x8x2_t* const rows) {
1019*b2055c35SXin Li // {rows} = in0 | in4
1020*b2055c35SXin Li // in8 | in12
1021*b2055c35SXin Li // B1 = in4 | in12
1022*b2055c35SXin Li const int16x8_t B1 =
1023*b2055c35SXin Li vcombine_s16(vget_high_s16(rows->val[0]), vget_high_s16(rows->val[1]));
1024*b2055c35SXin Li // C0 = kC1 * in4 | kC1 * in12
1025*b2055c35SXin Li // C1 = kC2 * in4 | kC2 * in12
1026*b2055c35SXin Li const int16x8_t C0 = vsraq_n_s16(B1, vqdmulhq_n_s16(B1, kC1), 1);
1027*b2055c35SXin Li const int16x8_t C1 = vqdmulhq_n_s16(B1, kC2);
1028*b2055c35SXin Li const int16x4_t a = vqadd_s16(vget_low_s16(rows->val[0]),
1029*b2055c35SXin Li vget_low_s16(rows->val[1])); // in0 + in8
1030*b2055c35SXin Li const int16x4_t b = vqsub_s16(vget_low_s16(rows->val[0]),
1031*b2055c35SXin Li vget_low_s16(rows->val[1])); // in0 - in8
1032*b2055c35SXin Li // c = kC2 * in4 - kC1 * in12
1033*b2055c35SXin Li // d = kC1 * in4 + kC2 * in12
1034*b2055c35SXin Li const int16x4_t c = vqsub_s16(vget_low_s16(C1), vget_high_s16(C0));
1035*b2055c35SXin Li const int16x4_t d = vqadd_s16(vget_low_s16(C0), vget_high_s16(C1));
1036*b2055c35SXin Li const int16x8_t D0 = vcombine_s16(a, b); // D0 = a | b
1037*b2055c35SXin Li const int16x8_t D1 = vcombine_s16(d, c); // D1 = d | c
1038*b2055c35SXin Li const int16x8_t E0 = vqaddq_s16(D0, D1); // a+d | b+c
1039*b2055c35SXin Li const int16x8_t E_tmp = vqsubq_s16(D0, D1); // a-d | b-c
1040*b2055c35SXin Li const int16x8_t E1 = vcombine_s16(vget_high_s16(E_tmp), vget_low_s16(E_tmp));
1041*b2055c35SXin Li Transpose8x2_NEON(E0, E1, rows);
1042*b2055c35SXin Li }
1043*b2055c35SXin Li
TransformOne_NEON(const int16_t * in,uint8_t * dst)1044*b2055c35SXin Li static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
1045*b2055c35SXin Li int16x8x2_t rows;
1046*b2055c35SXin Li INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8));
1047*b2055c35SXin Li TransformPass_NEON(&rows);
1048*b2055c35SXin Li TransformPass_NEON(&rows);
1049*b2055c35SXin Li Add4x4_NEON(rows.val[0], rows.val[1], dst);
1050*b2055c35SXin Li }
1051*b2055c35SXin Li
1052*b2055c35SXin Li #else
1053*b2055c35SXin Li
TransformOne_NEON(const int16_t * in,uint8_t * dst)1054*b2055c35SXin Li static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
1055*b2055c35SXin Li const int kBPS = BPS;
1056*b2055c35SXin Li // kC1, kC2. Padded because vld1.16 loads 8 bytes
1057*b2055c35SXin Li const int16_t constants[4] = { kC1, kC2, 0, 0 };
1058*b2055c35SXin Li /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
1059*b2055c35SXin Li __asm__ volatile (
1060*b2055c35SXin Li "vld1.16 {q1, q2}, [%[in]] \n"
1061*b2055c35SXin Li "vld1.16 {d0}, [%[constants]] \n"
1062*b2055c35SXin Li
1063*b2055c35SXin Li /* d2: in[0]
1064*b2055c35SXin Li * d3: in[8]
1065*b2055c35SXin Li * d4: in[4]
1066*b2055c35SXin Li * d5: in[12]
1067*b2055c35SXin Li */
1068*b2055c35SXin Li "vswp d3, d4 \n"
1069*b2055c35SXin Li
1070*b2055c35SXin Li /* q8 = {in[4], in[12]} * kC1 * 2 >> 16
1071*b2055c35SXin Li * q9 = {in[4], in[12]} * kC2 >> 16
1072*b2055c35SXin Li */
1073*b2055c35SXin Li "vqdmulh.s16 q8, q2, d0[0] \n"
1074*b2055c35SXin Li "vqdmulh.s16 q9, q2, d0[1] \n"
1075*b2055c35SXin Li
1076*b2055c35SXin Li /* d22 = a = in[0] + in[8]
1077*b2055c35SXin Li * d23 = b = in[0] - in[8]
1078*b2055c35SXin Li */
1079*b2055c35SXin Li "vqadd.s16 d22, d2, d3 \n"
1080*b2055c35SXin Li "vqsub.s16 d23, d2, d3 \n"
1081*b2055c35SXin Li
1082*b2055c35SXin Li /* The multiplication should be x * kC1 >> 16
1083*b2055c35SXin Li * However, with vqdmulh we get x * kC1 * 2 >> 16
1084*b2055c35SXin Li * (multiply, double, return high half)
1085*b2055c35SXin Li * We avoided this in kC2 by pre-shifting the constant.
1086*b2055c35SXin Li * q8 = in[4]/[12] * kC1 >> 16
1087*b2055c35SXin Li */
1088*b2055c35SXin Li "vshr.s16 q8, q8, #1 \n"
1089*b2055c35SXin Li
1090*b2055c35SXin Li /* Add {in[4], in[12]} back after the multiplication. This is handled by
1091*b2055c35SXin Li * adding 1 << 16 to kC1 in the libwebp C code.
1092*b2055c35SXin Li */
1093*b2055c35SXin Li "vqadd.s16 q8, q2, q8 \n"
1094*b2055c35SXin Li
1095*b2055c35SXin Li /* d20 = c = in[4]*kC2 - in[12]*kC1
1096*b2055c35SXin Li * d21 = d = in[4]*kC1 + in[12]*kC2
1097*b2055c35SXin Li */
1098*b2055c35SXin Li "vqsub.s16 d20, d18, d17 \n"
1099*b2055c35SXin Li "vqadd.s16 d21, d19, d16 \n"
1100*b2055c35SXin Li
1101*b2055c35SXin Li /* d2 = tmp[0] = a + d
1102*b2055c35SXin Li * d3 = tmp[1] = b + c
1103*b2055c35SXin Li * d4 = tmp[2] = b - c
1104*b2055c35SXin Li * d5 = tmp[3] = a - d
1105*b2055c35SXin Li */
1106*b2055c35SXin Li "vqadd.s16 d2, d22, d21 \n"
1107*b2055c35SXin Li "vqadd.s16 d3, d23, d20 \n"
1108*b2055c35SXin Li "vqsub.s16 d4, d23, d20 \n"
1109*b2055c35SXin Li "vqsub.s16 d5, d22, d21 \n"
1110*b2055c35SXin Li
1111*b2055c35SXin Li "vzip.16 q1, q2 \n"
1112*b2055c35SXin Li "vzip.16 q1, q2 \n"
1113*b2055c35SXin Li
1114*b2055c35SXin Li "vswp d3, d4 \n"
1115*b2055c35SXin Li
1116*b2055c35SXin Li /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
1117*b2055c35SXin Li * q9 = {tmp[4], tmp[12]} * kC2 >> 16
1118*b2055c35SXin Li */
1119*b2055c35SXin Li "vqdmulh.s16 q8, q2, d0[0] \n"
1120*b2055c35SXin Li "vqdmulh.s16 q9, q2, d0[1] \n"
1121*b2055c35SXin Li
1122*b2055c35SXin Li /* d22 = a = tmp[0] + tmp[8]
1123*b2055c35SXin Li * d23 = b = tmp[0] - tmp[8]
1124*b2055c35SXin Li */
1125*b2055c35SXin Li "vqadd.s16 d22, d2, d3 \n"
1126*b2055c35SXin Li "vqsub.s16 d23, d2, d3 \n"
1127*b2055c35SXin Li
1128*b2055c35SXin Li /* See long winded explanations prior */
1129*b2055c35SXin Li "vshr.s16 q8, q8, #1 \n"
1130*b2055c35SXin Li "vqadd.s16 q8, q2, q8 \n"
1131*b2055c35SXin Li
1132*b2055c35SXin Li /* d20 = c = in[4]*kC2 - in[12]*kC1
1133*b2055c35SXin Li * d21 = d = in[4]*kC1 + in[12]*kC2
1134*b2055c35SXin Li */
1135*b2055c35SXin Li "vqsub.s16 d20, d18, d17 \n"
1136*b2055c35SXin Li "vqadd.s16 d21, d19, d16 \n"
1137*b2055c35SXin Li
1138*b2055c35SXin Li /* d2 = tmp[0] = a + d
1139*b2055c35SXin Li * d3 = tmp[1] = b + c
1140*b2055c35SXin Li * d4 = tmp[2] = b - c
1141*b2055c35SXin Li * d5 = tmp[3] = a - d
1142*b2055c35SXin Li */
1143*b2055c35SXin Li "vqadd.s16 d2, d22, d21 \n"
1144*b2055c35SXin Li "vqadd.s16 d3, d23, d20 \n"
1145*b2055c35SXin Li "vqsub.s16 d4, d23, d20 \n"
1146*b2055c35SXin Li "vqsub.s16 d5, d22, d21 \n"
1147*b2055c35SXin Li
1148*b2055c35SXin Li "vld1.32 d6[0], [%[dst]], %[kBPS] \n"
1149*b2055c35SXin Li "vld1.32 d6[1], [%[dst]], %[kBPS] \n"
1150*b2055c35SXin Li "vld1.32 d7[0], [%[dst]], %[kBPS] \n"
1151*b2055c35SXin Li "vld1.32 d7[1], [%[dst]], %[kBPS] \n"
1152*b2055c35SXin Li
1153*b2055c35SXin Li "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
1154*b2055c35SXin Li
1155*b2055c35SXin Li /* (val) + 4 >> 3 */
1156*b2055c35SXin Li "vrshr.s16 d2, d2, #3 \n"
1157*b2055c35SXin Li "vrshr.s16 d3, d3, #3 \n"
1158*b2055c35SXin Li "vrshr.s16 d4, d4, #3 \n"
1159*b2055c35SXin Li "vrshr.s16 d5, d5, #3 \n"
1160*b2055c35SXin Li
1161*b2055c35SXin Li "vzip.16 q1, q2 \n"
1162*b2055c35SXin Li "vzip.16 q1, q2 \n"
1163*b2055c35SXin Li
1164*b2055c35SXin Li /* Must accumulate before saturating */
1165*b2055c35SXin Li "vmovl.u8 q8, d6 \n"
1166*b2055c35SXin Li "vmovl.u8 q9, d7 \n"
1167*b2055c35SXin Li
1168*b2055c35SXin Li "vqadd.s16 q1, q1, q8 \n"
1169*b2055c35SXin Li "vqadd.s16 q2, q2, q9 \n"
1170*b2055c35SXin Li
1171*b2055c35SXin Li "vqmovun.s16 d0, q1 \n"
1172*b2055c35SXin Li "vqmovun.s16 d1, q2 \n"
1173*b2055c35SXin Li
1174*b2055c35SXin Li "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
1175*b2055c35SXin Li "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
1176*b2055c35SXin Li "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
1177*b2055c35SXin Li "vst1.32 d1[1], [%[dst]] \n"
1178*b2055c35SXin Li
1179*b2055c35SXin Li : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
1180*b2055c35SXin Li : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */
1181*b2055c35SXin Li : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */
1182*b2055c35SXin Li );
1183*b2055c35SXin Li }
1184*b2055c35SXin Li
1185*b2055c35SXin Li #endif // WEBP_USE_INTRINSICS
1186*b2055c35SXin Li
TransformTwo_NEON(const int16_t * in,uint8_t * dst,int do_two)1187*b2055c35SXin Li static void TransformTwo_NEON(const int16_t* in, uint8_t* dst, int do_two) {
1188*b2055c35SXin Li TransformOne_NEON(in, dst);
1189*b2055c35SXin Li if (do_two) {
1190*b2055c35SXin Li TransformOne_NEON(in + 16, dst + 4);
1191*b2055c35SXin Li }
1192*b2055c35SXin Li }
1193*b2055c35SXin Li
TransformDC_NEON(const int16_t * in,uint8_t * dst)1194*b2055c35SXin Li static void TransformDC_NEON(const int16_t* in, uint8_t* dst) {
1195*b2055c35SXin Li const int16x8_t DC = vdupq_n_s16(in[0]);
1196*b2055c35SXin Li Add4x4_NEON(DC, DC, dst);
1197*b2055c35SXin Li }
1198*b2055c35SXin Li
1199*b2055c35SXin Li //------------------------------------------------------------------------------
1200*b2055c35SXin Li
1201*b2055c35SXin Li #define STORE_WHT(dst, col, rows) do { \
1202*b2055c35SXin Li *dst = vgetq_lane_s32(rows.val[0], col); (dst) += 16; \
1203*b2055c35SXin Li *dst = vgetq_lane_s32(rows.val[1], col); (dst) += 16; \
1204*b2055c35SXin Li *dst = vgetq_lane_s32(rows.val[2], col); (dst) += 16; \
1205*b2055c35SXin Li *dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \
1206*b2055c35SXin Li } while (0)
1207*b2055c35SXin Li
TransformWHT_NEON(const int16_t * in,int16_t * out)1208*b2055c35SXin Li static void TransformWHT_NEON(const int16_t* in, int16_t* out) {
1209*b2055c35SXin Li int32x4x4_t tmp;
1210*b2055c35SXin Li
1211*b2055c35SXin Li {
1212*b2055c35SXin Li // Load the source.
1213*b2055c35SXin Li const int16x4_t in00_03 = vld1_s16(in + 0);
1214*b2055c35SXin Li const int16x4_t in04_07 = vld1_s16(in + 4);
1215*b2055c35SXin Li const int16x4_t in08_11 = vld1_s16(in + 8);
1216*b2055c35SXin Li const int16x4_t in12_15 = vld1_s16(in + 12);
1217*b2055c35SXin Li const int32x4_t a0 = vaddl_s16(in00_03, in12_15); // in[0..3] + in[12..15]
1218*b2055c35SXin Li const int32x4_t a1 = vaddl_s16(in04_07, in08_11); // in[4..7] + in[8..11]
1219*b2055c35SXin Li const int32x4_t a2 = vsubl_s16(in04_07, in08_11); // in[4..7] - in[8..11]
1220*b2055c35SXin Li const int32x4_t a3 = vsubl_s16(in00_03, in12_15); // in[0..3] - in[12..15]
1221*b2055c35SXin Li tmp.val[0] = vaddq_s32(a0, a1);
1222*b2055c35SXin Li tmp.val[1] = vaddq_s32(a3, a2);
1223*b2055c35SXin Li tmp.val[2] = vsubq_s32(a0, a1);
1224*b2055c35SXin Li tmp.val[3] = vsubq_s32(a3, a2);
1225*b2055c35SXin Li // Arrange the temporary results column-wise.
1226*b2055c35SXin Li tmp = Transpose4x4_NEON(tmp);
1227*b2055c35SXin Li }
1228*b2055c35SXin Li
1229*b2055c35SXin Li {
1230*b2055c35SXin Li const int32x4_t kCst3 = vdupq_n_s32(3);
1231*b2055c35SXin Li const int32x4_t dc = vaddq_s32(tmp.val[0], kCst3); // add rounder
1232*b2055c35SXin Li const int32x4_t a0 = vaddq_s32(dc, tmp.val[3]);
1233*b2055c35SXin Li const int32x4_t a1 = vaddq_s32(tmp.val[1], tmp.val[2]);
1234*b2055c35SXin Li const int32x4_t a2 = vsubq_s32(tmp.val[1], tmp.val[2]);
1235*b2055c35SXin Li const int32x4_t a3 = vsubq_s32(dc, tmp.val[3]);
1236*b2055c35SXin Li
1237*b2055c35SXin Li tmp.val[0] = vaddq_s32(a0, a1);
1238*b2055c35SXin Li tmp.val[1] = vaddq_s32(a3, a2);
1239*b2055c35SXin Li tmp.val[2] = vsubq_s32(a0, a1);
1240*b2055c35SXin Li tmp.val[3] = vsubq_s32(a3, a2);
1241*b2055c35SXin Li
1242*b2055c35SXin Li // right shift the results by 3.
1243*b2055c35SXin Li tmp.val[0] = vshrq_n_s32(tmp.val[0], 3);
1244*b2055c35SXin Li tmp.val[1] = vshrq_n_s32(tmp.val[1], 3);
1245*b2055c35SXin Li tmp.val[2] = vshrq_n_s32(tmp.val[2], 3);
1246*b2055c35SXin Li tmp.val[3] = vshrq_n_s32(tmp.val[3], 3);
1247*b2055c35SXin Li
1248*b2055c35SXin Li STORE_WHT(out, 0, tmp);
1249*b2055c35SXin Li STORE_WHT(out, 1, tmp);
1250*b2055c35SXin Li STORE_WHT(out, 2, tmp);
1251*b2055c35SXin Li STORE_WHT(out, 3, tmp);
1252*b2055c35SXin Li }
1253*b2055c35SXin Li }
1254*b2055c35SXin Li
1255*b2055c35SXin Li #undef STORE_WHT
1256*b2055c35SXin Li
1257*b2055c35SXin Li //------------------------------------------------------------------------------
1258*b2055c35SXin Li
TransformAC3_NEON(const int16_t * in,uint8_t * dst)1259*b2055c35SXin Li static void TransformAC3_NEON(const int16_t* in, uint8_t* dst) {
1260*b2055c35SXin Li const int16x4_t A = vld1_dup_s16(in);
1261*b2055c35SXin Li const int16x4_t c4 = vdup_n_s16(WEBP_TRANSFORM_AC3_MUL2(in[4]));
1262*b2055c35SXin Li const int16x4_t d4 = vdup_n_s16(WEBP_TRANSFORM_AC3_MUL1(in[4]));
1263*b2055c35SXin Li const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]);
1264*b2055c35SXin Li const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]);
1265*b2055c35SXin Li const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 |
1266*b2055c35SXin Li (uint64_t)( c1 & 0xffff) << 16 |
1267*b2055c35SXin Li (uint64_t)(-c1 & 0xffff) << 32 |
1268*b2055c35SXin Li (uint64_t)(-d1 & 0xffff) << 48;
1269*b2055c35SXin Li const int16x4_t CD = vcreate_s16(cd);
1270*b2055c35SXin Li const int16x4_t B = vqadd_s16(A, CD);
1271*b2055c35SXin Li const int16x8_t m0_m1 = vcombine_s16(vqadd_s16(B, d4), vqadd_s16(B, c4));
1272*b2055c35SXin Li const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4));
1273*b2055c35SXin Li Add4x4_NEON(m0_m1, m2_m3, dst);
1274*b2055c35SXin Li }
1275*b2055c35SXin Li
1276*b2055c35SXin Li //------------------------------------------------------------------------------
1277*b2055c35SXin Li // 4x4
1278*b2055c35SXin Li
DC4_NEON(uint8_t * dst)1279*b2055c35SXin Li static void DC4_NEON(uint8_t* dst) { // DC
1280*b2055c35SXin Li const uint8x8_t A = vld1_u8(dst - BPS); // top row
1281*b2055c35SXin Li const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
1282*b2055c35SXin Li const uint16x4_t p1 = vpadd_u16(p0, p0);
1283*b2055c35SXin Li const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1);
1284*b2055c35SXin Li const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1);
1285*b2055c35SXin Li const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1);
1286*b2055c35SXin Li const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1);
1287*b2055c35SXin Li const uint16x8_t s0 = vaddl_u8(L0, L1);
1288*b2055c35SXin Li const uint16x8_t s1 = vaddl_u8(L2, L3);
1289*b2055c35SXin Li const uint16x8_t s01 = vaddq_u16(s0, s1);
1290*b2055c35SXin Li const uint16x8_t sum = vaddq_u16(s01, vcombine_u16(p1, p1));
1291*b2055c35SXin Li const uint8x8_t dc0 = vrshrn_n_u16(sum, 3); // (sum + 4) >> 3
1292*b2055c35SXin Li const uint8x8_t dc = vdup_lane_u8(dc0, 0);
1293*b2055c35SXin Li int i;
1294*b2055c35SXin Li for (i = 0; i < 4; ++i) {
1295*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc), 0);
1296*b2055c35SXin Li }
1297*b2055c35SXin Li }
1298*b2055c35SXin Li
1299*b2055c35SXin Li // TrueMotion (4x4 + 8x8)
TrueMotion_NEON(uint8_t * dst,int size)1300*b2055c35SXin Li static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, int size) {
1301*b2055c35SXin Li const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
1302*b2055c35SXin Li const uint8x8_t T = vld1_u8(dst - BPS); // top row 'A[0..3]'
1303*b2055c35SXin Li const int16x8_t d = vreinterpretq_s16_u16(vsubl_u8(T, TL)); // A[c] - A[-1]
1304*b2055c35SXin Li int y;
1305*b2055c35SXin Li for (y = 0; y < size; y += 4) {
1306*b2055c35SXin Li // left edge
1307*b2055c35SXin Li const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
1308*b2055c35SXin Li const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
1309*b2055c35SXin Li const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
1310*b2055c35SXin Li const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
1311*b2055c35SXin Li const int16x8_t r0 = vaddq_s16(L0, d); // L[r] + A[c] - A[-1]
1312*b2055c35SXin Li const int16x8_t r1 = vaddq_s16(L1, d);
1313*b2055c35SXin Li const int16x8_t r2 = vaddq_s16(L2, d);
1314*b2055c35SXin Li const int16x8_t r3 = vaddq_s16(L3, d);
1315*b2055c35SXin Li // Saturate and store the result.
1316*b2055c35SXin Li const uint32x2_t r0_u32 = vreinterpret_u32_u8(vqmovun_s16(r0));
1317*b2055c35SXin Li const uint32x2_t r1_u32 = vreinterpret_u32_u8(vqmovun_s16(r1));
1318*b2055c35SXin Li const uint32x2_t r2_u32 = vreinterpret_u32_u8(vqmovun_s16(r2));
1319*b2055c35SXin Li const uint32x2_t r3_u32 = vreinterpret_u32_u8(vqmovun_s16(r3));
1320*b2055c35SXin Li if (size == 4) {
1321*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0_u32, 0);
1322*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1_u32, 0);
1323*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2_u32, 0);
1324*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3_u32, 0);
1325*b2055c35SXin Li } else {
1326*b2055c35SXin Li vst1_u32((uint32_t*)(dst + 0 * BPS), r0_u32);
1327*b2055c35SXin Li vst1_u32((uint32_t*)(dst + 1 * BPS), r1_u32);
1328*b2055c35SXin Li vst1_u32((uint32_t*)(dst + 2 * BPS), r2_u32);
1329*b2055c35SXin Li vst1_u32((uint32_t*)(dst + 3 * BPS), r3_u32);
1330*b2055c35SXin Li }
1331*b2055c35SXin Li dst += 4 * BPS;
1332*b2055c35SXin Li }
1333*b2055c35SXin Li }
1334*b2055c35SXin Li
TM4_NEON(uint8_t * dst)1335*b2055c35SXin Li static void TM4_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 4); }
1336*b2055c35SXin Li
VE4_NEON(uint8_t * dst)1337*b2055c35SXin Li static void VE4_NEON(uint8_t* dst) { // vertical
1338*b2055c35SXin Li // NB: avoid vld1_u64 here as an alignment hint may be added -> SIGBUS.
1339*b2055c35SXin Li const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(dst - BPS - 1)); // top row
1340*b2055c35SXin Li const uint64x1_t A1 = vshr_n_u64(A0, 8);
1341*b2055c35SXin Li const uint64x1_t A2 = vshr_n_u64(A0, 16);
1342*b2055c35SXin Li const uint8x8_t ABCDEFGH = vreinterpret_u8_u64(A0);
1343*b2055c35SXin Li const uint8x8_t BCDEFGH0 = vreinterpret_u8_u64(A1);
1344*b2055c35SXin Li const uint8x8_t CDEFGH00 = vreinterpret_u8_u64(A2);
1345*b2055c35SXin Li const uint8x8_t b = vhadd_u8(ABCDEFGH, CDEFGH00);
1346*b2055c35SXin Li const uint8x8_t avg = vrhadd_u8(b, BCDEFGH0);
1347*b2055c35SXin Li int i;
1348*b2055c35SXin Li for (i = 0; i < 4; ++i) {
1349*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(avg), 0);
1350*b2055c35SXin Li }
1351*b2055c35SXin Li }
1352*b2055c35SXin Li
RD4_NEON(uint8_t * dst)1353*b2055c35SXin Li static void RD4_NEON(uint8_t* dst) { // Down-right
1354*b2055c35SXin Li const uint8x8_t XABCD_u8 = vld1_u8(dst - BPS - 1);
1355*b2055c35SXin Li const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
1356*b2055c35SXin Li const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32);
1357*b2055c35SXin Li const uint32_t I = dst[-1 + 0 * BPS];
1358*b2055c35SXin Li const uint32_t J = dst[-1 + 1 * BPS];
1359*b2055c35SXin Li const uint32_t K = dst[-1 + 2 * BPS];
1360*b2055c35SXin Li const uint32_t L = dst[-1 + 3 * BPS];
1361*b2055c35SXin Li const uint64x1_t LKJI____ =
1362*b2055c35SXin Li vcreate_u64((uint64_t)L | (K << 8) | (J << 16) | (I << 24));
1363*b2055c35SXin Li const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC);
1364*b2055c35SXin Li const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8));
1365*b2055c35SXin Li const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16));
1366*b2055c35SXin Li const uint8_t D = vget_lane_u8(XABCD_u8, 4);
1367*b2055c35SXin Li const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6);
1368*b2055c35SXin Li const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC);
1369*b2055c35SXin Li const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8);
1370*b2055c35SXin Li const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_);
1371*b2055c35SXin Li const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
1372*b2055c35SXin Li const uint32x2_t r3 = vreinterpret_u32_u8(avg2);
1373*b2055c35SXin Li const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
1374*b2055c35SXin Li const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
1375*b2055c35SXin Li const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
1376*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0);
1377*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0);
1378*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0);
1379*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0);
1380*b2055c35SXin Li }
1381*b2055c35SXin Li
LD4_NEON(uint8_t * dst)1382*b2055c35SXin Li static void LD4_NEON(uint8_t* dst) { // Down-left
1383*b2055c35SXin Li // Note using the same shift trick as VE4() is slower here.
1384*b2055c35SXin Li const uint8x8_t ABCDEFGH = vld1_u8(dst - BPS + 0);
1385*b2055c35SXin Li const uint8x8_t BCDEFGH0 = vld1_u8(dst - BPS + 1);
1386*b2055c35SXin Li const uint8x8_t CDEFGH00 = vld1_u8(dst - BPS + 2);
1387*b2055c35SXin Li const uint8x8_t CDEFGHH0 = vset_lane_u8(dst[-BPS + 7], CDEFGH00, 6);
1388*b2055c35SXin Li const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGHH0);
1389*b2055c35SXin Li const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0);
1390*b2055c35SXin Li const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
1391*b2055c35SXin Li const uint32x2_t r0 = vreinterpret_u32_u8(avg2);
1392*b2055c35SXin Li const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
1393*b2055c35SXin Li const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
1394*b2055c35SXin Li const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
1395*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0);
1396*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0);
1397*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0);
1398*b2055c35SXin Li vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0);
1399*b2055c35SXin Li }
1400*b2055c35SXin Li
1401*b2055c35SXin Li //------------------------------------------------------------------------------
1402*b2055c35SXin Li // Chroma
1403*b2055c35SXin Li
VE8uv_NEON(uint8_t * dst)1404*b2055c35SXin Li static void VE8uv_NEON(uint8_t* dst) { // vertical
1405*b2055c35SXin Li const uint8x8_t top = vld1_u8(dst - BPS);
1406*b2055c35SXin Li int j;
1407*b2055c35SXin Li for (j = 0; j < 8; ++j) {
1408*b2055c35SXin Li vst1_u8(dst + j * BPS, top);
1409*b2055c35SXin Li }
1410*b2055c35SXin Li }
1411*b2055c35SXin Li
HE8uv_NEON(uint8_t * dst)1412*b2055c35SXin Li static void HE8uv_NEON(uint8_t* dst) { // horizontal
1413*b2055c35SXin Li int j;
1414*b2055c35SXin Li for (j = 0; j < 8; ++j) {
1415*b2055c35SXin Li const uint8x8_t left = vld1_dup_u8(dst - 1);
1416*b2055c35SXin Li vst1_u8(dst, left);
1417*b2055c35SXin Li dst += BPS;
1418*b2055c35SXin Li }
1419*b2055c35SXin Li }
1420*b2055c35SXin Li
DC8_NEON(uint8_t * dst,int do_top,int do_left)1421*b2055c35SXin Li static WEBP_INLINE void DC8_NEON(uint8_t* dst, int do_top, int do_left) {
1422*b2055c35SXin Li uint16x8_t sum_top;
1423*b2055c35SXin Li uint16x8_t sum_left;
1424*b2055c35SXin Li uint8x8_t dc0;
1425*b2055c35SXin Li
1426*b2055c35SXin Li if (do_top) {
1427*b2055c35SXin Li const uint8x8_t A = vld1_u8(dst - BPS); // top row
1428*b2055c35SXin Li #if WEBP_AARCH64
1429*b2055c35SXin Li const uint16_t p2 = vaddlv_u8(A);
1430*b2055c35SXin Li sum_top = vdupq_n_u16(p2);
1431*b2055c35SXin Li #else
1432*b2055c35SXin Li const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
1433*b2055c35SXin Li const uint16x4_t p1 = vpadd_u16(p0, p0);
1434*b2055c35SXin Li const uint16x4_t p2 = vpadd_u16(p1, p1);
1435*b2055c35SXin Li sum_top = vcombine_u16(p2, p2);
1436*b2055c35SXin Li #endif
1437*b2055c35SXin Li }
1438*b2055c35SXin Li
1439*b2055c35SXin Li if (do_left) {
1440*b2055c35SXin Li const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1);
1441*b2055c35SXin Li const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1);
1442*b2055c35SXin Li const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1);
1443*b2055c35SXin Li const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1);
1444*b2055c35SXin Li const uint8x8_t L4 = vld1_u8(dst + 4 * BPS - 1);
1445*b2055c35SXin Li const uint8x8_t L5 = vld1_u8(dst + 5 * BPS - 1);
1446*b2055c35SXin Li const uint8x8_t L6 = vld1_u8(dst + 6 * BPS - 1);
1447*b2055c35SXin Li const uint8x8_t L7 = vld1_u8(dst + 7 * BPS - 1);
1448*b2055c35SXin Li const uint16x8_t s0 = vaddl_u8(L0, L1);
1449*b2055c35SXin Li const uint16x8_t s1 = vaddl_u8(L2, L3);
1450*b2055c35SXin Li const uint16x8_t s2 = vaddl_u8(L4, L5);
1451*b2055c35SXin Li const uint16x8_t s3 = vaddl_u8(L6, L7);
1452*b2055c35SXin Li const uint16x8_t s01 = vaddq_u16(s0, s1);
1453*b2055c35SXin Li const uint16x8_t s23 = vaddq_u16(s2, s3);
1454*b2055c35SXin Li sum_left = vaddq_u16(s01, s23);
1455*b2055c35SXin Li }
1456*b2055c35SXin Li
1457*b2055c35SXin Li if (do_top && do_left) {
1458*b2055c35SXin Li const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
1459*b2055c35SXin Li dc0 = vrshrn_n_u16(sum, 4);
1460*b2055c35SXin Li } else if (do_top) {
1461*b2055c35SXin Li dc0 = vrshrn_n_u16(sum_top, 3);
1462*b2055c35SXin Li } else if (do_left) {
1463*b2055c35SXin Li dc0 = vrshrn_n_u16(sum_left, 3);
1464*b2055c35SXin Li } else {
1465*b2055c35SXin Li dc0 = vdup_n_u8(0x80);
1466*b2055c35SXin Li }
1467*b2055c35SXin Li
1468*b2055c35SXin Li {
1469*b2055c35SXin Li const uint8x8_t dc = vdup_lane_u8(dc0, 0);
1470*b2055c35SXin Li int i;
1471*b2055c35SXin Li for (i = 0; i < 8; ++i) {
1472*b2055c35SXin Li vst1_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc));
1473*b2055c35SXin Li }
1474*b2055c35SXin Li }
1475*b2055c35SXin Li }
1476*b2055c35SXin Li
DC8uv_NEON(uint8_t * dst)1477*b2055c35SXin Li static void DC8uv_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 1); }
DC8uvNoTop_NEON(uint8_t * dst)1478*b2055c35SXin Li static void DC8uvNoTop_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 1); }
DC8uvNoLeft_NEON(uint8_t * dst)1479*b2055c35SXin Li static void DC8uvNoLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 0); }
DC8uvNoTopLeft_NEON(uint8_t * dst)1480*b2055c35SXin Li static void DC8uvNoTopLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 0); }
1481*b2055c35SXin Li
TM8uv_NEON(uint8_t * dst)1482*b2055c35SXin Li static void TM8uv_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 8); }
1483*b2055c35SXin Li
1484*b2055c35SXin Li //------------------------------------------------------------------------------
1485*b2055c35SXin Li // 16x16
1486*b2055c35SXin Li
VE16_NEON(uint8_t * dst)1487*b2055c35SXin Li static void VE16_NEON(uint8_t* dst) { // vertical
1488*b2055c35SXin Li const uint8x16_t top = vld1q_u8(dst - BPS);
1489*b2055c35SXin Li int j;
1490*b2055c35SXin Li for (j = 0; j < 16; ++j) {
1491*b2055c35SXin Li vst1q_u8(dst + j * BPS, top);
1492*b2055c35SXin Li }
1493*b2055c35SXin Li }
1494*b2055c35SXin Li
HE16_NEON(uint8_t * dst)1495*b2055c35SXin Li static void HE16_NEON(uint8_t* dst) { // horizontal
1496*b2055c35SXin Li int j;
1497*b2055c35SXin Li for (j = 0; j < 16; ++j) {
1498*b2055c35SXin Li const uint8x16_t left = vld1q_dup_u8(dst - 1);
1499*b2055c35SXin Li vst1q_u8(dst, left);
1500*b2055c35SXin Li dst += BPS;
1501*b2055c35SXin Li }
1502*b2055c35SXin Li }
1503*b2055c35SXin Li
DC16_NEON(uint8_t * dst,int do_top,int do_left)1504*b2055c35SXin Li static WEBP_INLINE void DC16_NEON(uint8_t* dst, int do_top, int do_left) {
1505*b2055c35SXin Li uint16x8_t sum_top;
1506*b2055c35SXin Li uint16x8_t sum_left;
1507*b2055c35SXin Li uint8x8_t dc0;
1508*b2055c35SXin Li
1509*b2055c35SXin Li if (do_top) {
1510*b2055c35SXin Li const uint8x16_t A = vld1q_u8(dst - BPS); // top row
1511*b2055c35SXin Li #if WEBP_AARCH64
1512*b2055c35SXin Li const uint16_t p3 = vaddlvq_u8(A);
1513*b2055c35SXin Li sum_top = vdupq_n_u16(p3);
1514*b2055c35SXin Li #else
1515*b2055c35SXin Li const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top
1516*b2055c35SXin Li const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
1517*b2055c35SXin Li const uint16x4_t p2 = vpadd_u16(p1, p1);
1518*b2055c35SXin Li const uint16x4_t p3 = vpadd_u16(p2, p2);
1519*b2055c35SXin Li sum_top = vcombine_u16(p3, p3);
1520*b2055c35SXin Li #endif
1521*b2055c35SXin Li }
1522*b2055c35SXin Li
1523*b2055c35SXin Li if (do_left) {
1524*b2055c35SXin Li int i;
1525*b2055c35SXin Li sum_left = vdupq_n_u16(0);
1526*b2055c35SXin Li for (i = 0; i < 16; i += 8) {
1527*b2055c35SXin Li const uint8x8_t L0 = vld1_u8(dst + (i + 0) * BPS - 1);
1528*b2055c35SXin Li const uint8x8_t L1 = vld1_u8(dst + (i + 1) * BPS - 1);
1529*b2055c35SXin Li const uint8x8_t L2 = vld1_u8(dst + (i + 2) * BPS - 1);
1530*b2055c35SXin Li const uint8x8_t L3 = vld1_u8(dst + (i + 3) * BPS - 1);
1531*b2055c35SXin Li const uint8x8_t L4 = vld1_u8(dst + (i + 4) * BPS - 1);
1532*b2055c35SXin Li const uint8x8_t L5 = vld1_u8(dst + (i + 5) * BPS - 1);
1533*b2055c35SXin Li const uint8x8_t L6 = vld1_u8(dst + (i + 6) * BPS - 1);
1534*b2055c35SXin Li const uint8x8_t L7 = vld1_u8(dst + (i + 7) * BPS - 1);
1535*b2055c35SXin Li const uint16x8_t s0 = vaddl_u8(L0, L1);
1536*b2055c35SXin Li const uint16x8_t s1 = vaddl_u8(L2, L3);
1537*b2055c35SXin Li const uint16x8_t s2 = vaddl_u8(L4, L5);
1538*b2055c35SXin Li const uint16x8_t s3 = vaddl_u8(L6, L7);
1539*b2055c35SXin Li const uint16x8_t s01 = vaddq_u16(s0, s1);
1540*b2055c35SXin Li const uint16x8_t s23 = vaddq_u16(s2, s3);
1541*b2055c35SXin Li const uint16x8_t sum = vaddq_u16(s01, s23);
1542*b2055c35SXin Li sum_left = vaddq_u16(sum_left, sum);
1543*b2055c35SXin Li }
1544*b2055c35SXin Li }
1545*b2055c35SXin Li
1546*b2055c35SXin Li if (do_top && do_left) {
1547*b2055c35SXin Li const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
1548*b2055c35SXin Li dc0 = vrshrn_n_u16(sum, 5);
1549*b2055c35SXin Li } else if (do_top) {
1550*b2055c35SXin Li dc0 = vrshrn_n_u16(sum_top, 4);
1551*b2055c35SXin Li } else if (do_left) {
1552*b2055c35SXin Li dc0 = vrshrn_n_u16(sum_left, 4);
1553*b2055c35SXin Li } else {
1554*b2055c35SXin Li dc0 = vdup_n_u8(0x80);
1555*b2055c35SXin Li }
1556*b2055c35SXin Li
1557*b2055c35SXin Li {
1558*b2055c35SXin Li const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
1559*b2055c35SXin Li int i;
1560*b2055c35SXin Li for (i = 0; i < 16; ++i) {
1561*b2055c35SXin Li vst1q_u8(dst + i * BPS, dc);
1562*b2055c35SXin Li }
1563*b2055c35SXin Li }
1564*b2055c35SXin Li }
1565*b2055c35SXin Li
DC16TopLeft_NEON(uint8_t * dst)1566*b2055c35SXin Li static void DC16TopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 1); }
DC16NoTop_NEON(uint8_t * dst)1567*b2055c35SXin Li static void DC16NoTop_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 1); }
DC16NoLeft_NEON(uint8_t * dst)1568*b2055c35SXin Li static void DC16NoLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 0); }
DC16NoTopLeft_NEON(uint8_t * dst)1569*b2055c35SXin Li static void DC16NoTopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 0); }
1570*b2055c35SXin Li
TM16_NEON(uint8_t * dst)1571*b2055c35SXin Li static void TM16_NEON(uint8_t* dst) {
1572*b2055c35SXin Li const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
1573*b2055c35SXin Li const uint8x16_t T = vld1q_u8(dst - BPS); // top row 'A[0..15]'
1574*b2055c35SXin Li // A[c] - A[-1]
1575*b2055c35SXin Li const int16x8_t d_lo = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), TL));
1576*b2055c35SXin Li const int16x8_t d_hi = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), TL));
1577*b2055c35SXin Li int y;
1578*b2055c35SXin Li for (y = 0; y < 16; y += 4) {
1579*b2055c35SXin Li // left edge
1580*b2055c35SXin Li const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
1581*b2055c35SXin Li const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
1582*b2055c35SXin Li const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
1583*b2055c35SXin Li const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
1584*b2055c35SXin Li const int16x8_t r0_lo = vaddq_s16(L0, d_lo); // L[r] + A[c] - A[-1]
1585*b2055c35SXin Li const int16x8_t r1_lo = vaddq_s16(L1, d_lo);
1586*b2055c35SXin Li const int16x8_t r2_lo = vaddq_s16(L2, d_lo);
1587*b2055c35SXin Li const int16x8_t r3_lo = vaddq_s16(L3, d_lo);
1588*b2055c35SXin Li const int16x8_t r0_hi = vaddq_s16(L0, d_hi);
1589*b2055c35SXin Li const int16x8_t r1_hi = vaddq_s16(L1, d_hi);
1590*b2055c35SXin Li const int16x8_t r2_hi = vaddq_s16(L2, d_hi);
1591*b2055c35SXin Li const int16x8_t r3_hi = vaddq_s16(L3, d_hi);
1592*b2055c35SXin Li // Saturate and store the result.
1593*b2055c35SXin Li const uint8x16_t row0 = vcombine_u8(vqmovun_s16(r0_lo), vqmovun_s16(r0_hi));
1594*b2055c35SXin Li const uint8x16_t row1 = vcombine_u8(vqmovun_s16(r1_lo), vqmovun_s16(r1_hi));
1595*b2055c35SXin Li const uint8x16_t row2 = vcombine_u8(vqmovun_s16(r2_lo), vqmovun_s16(r2_hi));
1596*b2055c35SXin Li const uint8x16_t row3 = vcombine_u8(vqmovun_s16(r3_lo), vqmovun_s16(r3_hi));
1597*b2055c35SXin Li vst1q_u8(dst + 0 * BPS, row0);
1598*b2055c35SXin Li vst1q_u8(dst + 1 * BPS, row1);
1599*b2055c35SXin Li vst1q_u8(dst + 2 * BPS, row2);
1600*b2055c35SXin Li vst1q_u8(dst + 3 * BPS, row3);
1601*b2055c35SXin Li dst += 4 * BPS;
1602*b2055c35SXin Li }
1603*b2055c35SXin Li }
1604*b2055c35SXin Li
1605*b2055c35SXin Li //------------------------------------------------------------------------------
1606*b2055c35SXin Li // Entry point
1607*b2055c35SXin Li
1608*b2055c35SXin Li extern void VP8DspInitNEON(void);
1609*b2055c35SXin Li
VP8DspInitNEON(void)1610*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitNEON(void) {
1611*b2055c35SXin Li VP8Transform = TransformTwo_NEON;
1612*b2055c35SXin Li VP8TransformAC3 = TransformAC3_NEON;
1613*b2055c35SXin Li VP8TransformDC = TransformDC_NEON;
1614*b2055c35SXin Li VP8TransformWHT = TransformWHT_NEON;
1615*b2055c35SXin Li
1616*b2055c35SXin Li VP8VFilter16 = VFilter16_NEON;
1617*b2055c35SXin Li VP8VFilter16i = VFilter16i_NEON;
1618*b2055c35SXin Li VP8HFilter16 = HFilter16_NEON;
1619*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
1620*b2055c35SXin Li VP8HFilter16i = HFilter16i_NEON;
1621*b2055c35SXin Li #endif
1622*b2055c35SXin Li VP8VFilter8 = VFilter8_NEON;
1623*b2055c35SXin Li VP8VFilter8i = VFilter8i_NEON;
1624*b2055c35SXin Li #if !defined(WORK_AROUND_GCC)
1625*b2055c35SXin Li VP8HFilter8 = HFilter8_NEON;
1626*b2055c35SXin Li VP8HFilter8i = HFilter8i_NEON;
1627*b2055c35SXin Li #endif
1628*b2055c35SXin Li VP8SimpleVFilter16 = SimpleVFilter16_NEON;
1629*b2055c35SXin Li VP8SimpleHFilter16 = SimpleHFilter16_NEON;
1630*b2055c35SXin Li VP8SimpleVFilter16i = SimpleVFilter16i_NEON;
1631*b2055c35SXin Li VP8SimpleHFilter16i = SimpleHFilter16i_NEON;
1632*b2055c35SXin Li
1633*b2055c35SXin Li VP8PredLuma4[0] = DC4_NEON;
1634*b2055c35SXin Li VP8PredLuma4[1] = TM4_NEON;
1635*b2055c35SXin Li VP8PredLuma4[2] = VE4_NEON;
1636*b2055c35SXin Li VP8PredLuma4[4] = RD4_NEON;
1637*b2055c35SXin Li VP8PredLuma4[6] = LD4_NEON;
1638*b2055c35SXin Li
1639*b2055c35SXin Li VP8PredLuma16[0] = DC16TopLeft_NEON;
1640*b2055c35SXin Li VP8PredLuma16[1] = TM16_NEON;
1641*b2055c35SXin Li VP8PredLuma16[2] = VE16_NEON;
1642*b2055c35SXin Li VP8PredLuma16[3] = HE16_NEON;
1643*b2055c35SXin Li VP8PredLuma16[4] = DC16NoTop_NEON;
1644*b2055c35SXin Li VP8PredLuma16[5] = DC16NoLeft_NEON;
1645*b2055c35SXin Li VP8PredLuma16[6] = DC16NoTopLeft_NEON;
1646*b2055c35SXin Li
1647*b2055c35SXin Li VP8PredChroma8[0] = DC8uv_NEON;
1648*b2055c35SXin Li VP8PredChroma8[1] = TM8uv_NEON;
1649*b2055c35SXin Li VP8PredChroma8[2] = VE8uv_NEON;
1650*b2055c35SXin Li VP8PredChroma8[3] = HE8uv_NEON;
1651*b2055c35SXin Li VP8PredChroma8[4] = DC8uvNoTop_NEON;
1652*b2055c35SXin Li VP8PredChroma8[5] = DC8uvNoLeft_NEON;
1653*b2055c35SXin Li VP8PredChroma8[6] = DC8uvNoTopLeft_NEON;
1654*b2055c35SXin Li }
1655*b2055c35SXin Li
1656*b2055c35SXin Li #else // !WEBP_USE_NEON
1657*b2055c35SXin Li
1658*b2055c35SXin Li WEBP_DSP_INIT_STUB(VP8DspInitNEON)
1659*b2055c35SXin Li
1660*b2055c35SXin Li #endif // WEBP_USE_NEON
1661