1*b2055c35SXin Li // Copyright 2011 Google Inc. All Rights Reserved.
2*b2055c35SXin Li //
3*b2055c35SXin Li // Use of this source code is governed by a BSD-style license
4*b2055c35SXin Li // that can be found in the COPYING file in the root of the source
5*b2055c35SXin Li // tree. An additional intellectual property rights grant can be found
6*b2055c35SXin Li // in the file PATENTS. All contributing project authors may
7*b2055c35SXin Li // be found in the AUTHORS file in the root of the source tree.
8*b2055c35SXin Li // -----------------------------------------------------------------------------
9*b2055c35SXin Li //
10*b2055c35SXin Li // NEON version of YUV to RGB upsampling functions.
11*b2055c35SXin Li //
12*b2055c35SXin Li // Author: [email protected] (Mans Rullgard)
13*b2055c35SXin Li // Based on SSE code by: [email protected] (Somnath Banerjee)
14*b2055c35SXin Li
15*b2055c35SXin Li #include "src/dsp/dsp.h"
16*b2055c35SXin Li
17*b2055c35SXin Li #if defined(WEBP_USE_NEON)
18*b2055c35SXin Li
19*b2055c35SXin Li #include <assert.h>
20*b2055c35SXin Li #include <arm_neon.h>
21*b2055c35SXin Li #include <string.h>
22*b2055c35SXin Li #include "src/dsp/neon.h"
23*b2055c35SXin Li #include "src/dsp/yuv.h"
24*b2055c35SXin Li
25*b2055c35SXin Li #ifdef FANCY_UPSAMPLING
26*b2055c35SXin Li
27*b2055c35SXin Li //-----------------------------------------------------------------------------
28*b2055c35SXin Li // U/V upsampling
29*b2055c35SXin Li
30*b2055c35SXin Li // Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
31*b2055c35SXin Li #define UPSAMPLE_16PIXELS(r1, r2, out) do { \
32*b2055c35SXin Li const uint8x8_t a = vld1_u8(r1 + 0); \
33*b2055c35SXin Li const uint8x8_t b = vld1_u8(r1 + 1); \
34*b2055c35SXin Li const uint8x8_t c = vld1_u8(r2 + 0); \
35*b2055c35SXin Li const uint8x8_t d = vld1_u8(r2 + 1); \
36*b2055c35SXin Li /* a + b + c + d */ \
37*b2055c35SXin Li const uint16x8_t ad = vaddl_u8(a, d); \
38*b2055c35SXin Li const uint16x8_t bc = vaddl_u8(b, c); \
39*b2055c35SXin Li const uint16x8_t abcd = vaddq_u16(ad, bc); \
40*b2055c35SXin Li /* 3a + b + c + 3d */ \
41*b2055c35SXin Li const uint16x8_t al = vaddq_u16(abcd, vshlq_n_u16(ad, 1)); \
42*b2055c35SXin Li /* a + 3b + 3c + d */ \
43*b2055c35SXin Li const uint16x8_t bl = vaddq_u16(abcd, vshlq_n_u16(bc, 1)); \
44*b2055c35SXin Li \
45*b2055c35SXin Li const uint8x8_t diag2 = vshrn_n_u16(al, 3); \
46*b2055c35SXin Li const uint8x8_t diag1 = vshrn_n_u16(bl, 3); \
47*b2055c35SXin Li \
48*b2055c35SXin Li const uint8x8_t A = vrhadd_u8(a, diag1); \
49*b2055c35SXin Li const uint8x8_t B = vrhadd_u8(b, diag2); \
50*b2055c35SXin Li const uint8x8_t C = vrhadd_u8(c, diag2); \
51*b2055c35SXin Li const uint8x8_t D = vrhadd_u8(d, diag1); \
52*b2055c35SXin Li \
53*b2055c35SXin Li uint8x8x2_t A_B, C_D; \
54*b2055c35SXin Li INIT_VECTOR2(A_B, A, B); \
55*b2055c35SXin Li INIT_VECTOR2(C_D, C, D); \
56*b2055c35SXin Li vst2_u8(out + 0, A_B); \
57*b2055c35SXin Li vst2_u8(out + 32, C_D); \
58*b2055c35SXin Li } while (0)
59*b2055c35SXin Li
60*b2055c35SXin Li // Turn the macro into a function for reducing code-size when non-critical
Upsample16Pixels_NEON(const uint8_t * r1,const uint8_t * r2,uint8_t * out)61*b2055c35SXin Li static void Upsample16Pixels_NEON(const uint8_t* r1, const uint8_t* r2,
62*b2055c35SXin Li uint8_t* out) {
63*b2055c35SXin Li UPSAMPLE_16PIXELS(r1, r2, out);
64*b2055c35SXin Li }
65*b2055c35SXin Li
66*b2055c35SXin Li #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
67*b2055c35SXin Li uint8_t r1[9], r2[9]; \
68*b2055c35SXin Li memcpy(r1, (tb), (num_pixels)); \
69*b2055c35SXin Li memcpy(r2, (bb), (num_pixels)); \
70*b2055c35SXin Li /* replicate last byte */ \
71*b2055c35SXin Li memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
72*b2055c35SXin Li memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
73*b2055c35SXin Li Upsample16Pixels_NEON(r1, r2, out); \
74*b2055c35SXin Li }
75*b2055c35SXin Li
76*b2055c35SXin Li //-----------------------------------------------------------------------------
77*b2055c35SXin Li // YUV->RGB conversion
78*b2055c35SXin Li
79*b2055c35SXin Li // note: we represent the 33050 large constant as 32768 + 282
80*b2055c35SXin Li static const int16_t kCoeffs1[4] = { 19077, 26149, 6419, 13320 };
81*b2055c35SXin Li
82*b2055c35SXin Li #define v255 vdup_n_u8(255)
83*b2055c35SXin Li
84*b2055c35SXin Li #define STORE_Rgb(out, r, g, b) do { \
85*b2055c35SXin Li uint8x8x3_t r_g_b; \
86*b2055c35SXin Li INIT_VECTOR3(r_g_b, r, g, b); \
87*b2055c35SXin Li vst3_u8(out, r_g_b); \
88*b2055c35SXin Li } while (0)
89*b2055c35SXin Li
90*b2055c35SXin Li #define STORE_Bgr(out, r, g, b) do { \
91*b2055c35SXin Li uint8x8x3_t b_g_r; \
92*b2055c35SXin Li INIT_VECTOR3(b_g_r, b, g, r); \
93*b2055c35SXin Li vst3_u8(out, b_g_r); \
94*b2055c35SXin Li } while (0)
95*b2055c35SXin Li
96*b2055c35SXin Li #define STORE_Rgba(out, r, g, b) do { \
97*b2055c35SXin Li uint8x8x4_t r_g_b_v255; \
98*b2055c35SXin Li INIT_VECTOR4(r_g_b_v255, r, g, b, v255); \
99*b2055c35SXin Li vst4_u8(out, r_g_b_v255); \
100*b2055c35SXin Li } while (0)
101*b2055c35SXin Li
102*b2055c35SXin Li #define STORE_Bgra(out, r, g, b) do { \
103*b2055c35SXin Li uint8x8x4_t b_g_r_v255; \
104*b2055c35SXin Li INIT_VECTOR4(b_g_r_v255, b, g, r, v255); \
105*b2055c35SXin Li vst4_u8(out, b_g_r_v255); \
106*b2055c35SXin Li } while (0)
107*b2055c35SXin Li
108*b2055c35SXin Li #define STORE_Argb(out, r, g, b) do { \
109*b2055c35SXin Li uint8x8x4_t v255_r_g_b; \
110*b2055c35SXin Li INIT_VECTOR4(v255_r_g_b, v255, r, g, b); \
111*b2055c35SXin Li vst4_u8(out, v255_r_g_b); \
112*b2055c35SXin Li } while (0)
113*b2055c35SXin Li
114*b2055c35SXin Li #if (WEBP_SWAP_16BIT_CSP == 0)
115*b2055c35SXin Li #define ZIP_U8(lo, hi) vzip_u8((lo), (hi))
116*b2055c35SXin Li #else
117*b2055c35SXin Li #define ZIP_U8(lo, hi) vzip_u8((hi), (lo))
118*b2055c35SXin Li #endif
119*b2055c35SXin Li
120*b2055c35SXin Li #define STORE_Rgba4444(out, r, g, b) do { \
121*b2055c35SXin Li const uint8x8_t rg = vsri_n_u8(r, g, 4); /* shift g, insert r */ \
122*b2055c35SXin Li const uint8x8_t ba = vsri_n_u8(b, v255, 4); /* shift a, insert b */ \
123*b2055c35SXin Li const uint8x8x2_t rgba4444 = ZIP_U8(rg, ba); \
124*b2055c35SXin Li vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \
125*b2055c35SXin Li } while (0)
126*b2055c35SXin Li
127*b2055c35SXin Li #define STORE_Rgb565(out, r, g, b) do { \
128*b2055c35SXin Li const uint8x8_t rg = vsri_n_u8(r, g, 5); /* shift g and insert r */ \
129*b2055c35SXin Li const uint8x8_t g1 = vshl_n_u8(g, 3); /* pre-shift g: 3bits */ \
130*b2055c35SXin Li const uint8x8_t gb = vsri_n_u8(g1, b, 3); /* shift b and insert g */ \
131*b2055c35SXin Li const uint8x8x2_t rgb565 = ZIP_U8(rg, gb); \
132*b2055c35SXin Li vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \
133*b2055c35SXin Li } while (0)
134*b2055c35SXin Li
135*b2055c35SXin Li #define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) do { \
136*b2055c35SXin Li int i; \
137*b2055c35SXin Li for (i = 0; i < N; i += 8) { \
138*b2055c35SXin Li const int off = ((cur_x) + i) * XSTEP; \
139*b2055c35SXin Li const uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
140*b2055c35SXin Li const uint8x8_t u = vld1_u8((src_uv) + i + 0); \
141*b2055c35SXin Li const uint8x8_t v = vld1_u8((src_uv) + i + 16); \
142*b2055c35SXin Li const int16x8_t Y0 = vreinterpretq_s16_u16(vshll_n_u8(y, 7)); \
143*b2055c35SXin Li const int16x8_t U0 = vreinterpretq_s16_u16(vshll_n_u8(u, 7)); \
144*b2055c35SXin Li const int16x8_t V0 = vreinterpretq_s16_u16(vshll_n_u8(v, 7)); \
145*b2055c35SXin Li const int16x8_t Y1 = vqdmulhq_lane_s16(Y0, coeff1, 0); \
146*b2055c35SXin Li const int16x8_t R0 = vqdmulhq_lane_s16(V0, coeff1, 1); \
147*b2055c35SXin Li const int16x8_t G0 = vqdmulhq_lane_s16(U0, coeff1, 2); \
148*b2055c35SXin Li const int16x8_t G1 = vqdmulhq_lane_s16(V0, coeff1, 3); \
149*b2055c35SXin Li const int16x8_t B0 = vqdmulhq_n_s16(U0, 282); \
150*b2055c35SXin Li const int16x8_t R1 = vqaddq_s16(Y1, R_Rounder); \
151*b2055c35SXin Li const int16x8_t G2 = vqaddq_s16(Y1, G_Rounder); \
152*b2055c35SXin Li const int16x8_t B1 = vqaddq_s16(Y1, B_Rounder); \
153*b2055c35SXin Li const int16x8_t R2 = vqaddq_s16(R0, R1); \
154*b2055c35SXin Li const int16x8_t G3 = vqaddq_s16(G0, G1); \
155*b2055c35SXin Li const int16x8_t B2 = vqaddq_s16(B0, B1); \
156*b2055c35SXin Li const int16x8_t G4 = vqsubq_s16(G2, G3); \
157*b2055c35SXin Li const int16x8_t B3 = vqaddq_s16(B2, U0); \
158*b2055c35SXin Li const uint8x8_t R = vqshrun_n_s16(R2, YUV_FIX2); \
159*b2055c35SXin Li const uint8x8_t G = vqshrun_n_s16(G4, YUV_FIX2); \
160*b2055c35SXin Li const uint8x8_t B = vqshrun_n_s16(B3, YUV_FIX2); \
161*b2055c35SXin Li STORE_ ## FMT(out + off, R, G, B); \
162*b2055c35SXin Li } \
163*b2055c35SXin Li } while (0)
164*b2055c35SXin Li
165*b2055c35SXin Li #define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
166*b2055c35SXin Li int i; \
167*b2055c35SXin Li for (i = 0; i < N; i++) { \
168*b2055c35SXin Li const int off = ((cur_x) + i) * XSTEP; \
169*b2055c35SXin Li const int y = src_y[(cur_x) + i]; \
170*b2055c35SXin Li const int u = (src_uv)[i]; \
171*b2055c35SXin Li const int v = (src_uv)[i + 16]; \
172*b2055c35SXin Li FUNC(y, u, v, rgb + off); \
173*b2055c35SXin Li } \
174*b2055c35SXin Li }
175*b2055c35SXin Li
176*b2055c35SXin Li #define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
177*b2055c35SXin Li top_dst, bottom_dst, cur_x, len) { \
178*b2055c35SXin Li CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
179*b2055c35SXin Li if (bottom_y != NULL) { \
180*b2055c35SXin Li CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
181*b2055c35SXin Li } \
182*b2055c35SXin Li }
183*b2055c35SXin Li
184*b2055c35SXin Li #define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
185*b2055c35SXin Li top_dst, bottom_dst, cur_x, len) { \
186*b2055c35SXin Li CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
187*b2055c35SXin Li if (bottom_y != NULL) { \
188*b2055c35SXin Li CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
189*b2055c35SXin Li } \
190*b2055c35SXin Li }
191*b2055c35SXin Li
192*b2055c35SXin Li #define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
193*b2055c35SXin Li static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
194*b2055c35SXin Li const uint8_t* top_u, const uint8_t* top_v, \
195*b2055c35SXin Li const uint8_t* cur_u, const uint8_t* cur_v, \
196*b2055c35SXin Li uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
197*b2055c35SXin Li int block; \
198*b2055c35SXin Li /* 16 byte aligned array to cache reconstructed u and v */ \
199*b2055c35SXin Li uint8_t uv_buf[2 * 32 + 15]; \
200*b2055c35SXin Li uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
201*b2055c35SXin Li const int uv_len = (len + 1) >> 1; \
202*b2055c35SXin Li /* 9 pixels must be read-able for each block */ \
203*b2055c35SXin Li const int num_blocks = (uv_len - 1) >> 3; \
204*b2055c35SXin Li const int leftover = uv_len - num_blocks * 8; \
205*b2055c35SXin Li const int last_pos = 1 + 16 * num_blocks; \
206*b2055c35SXin Li \
207*b2055c35SXin Li const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
208*b2055c35SXin Li const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
209*b2055c35SXin Li \
210*b2055c35SXin Li const int16x4_t coeff1 = vld1_s16(kCoeffs1); \
211*b2055c35SXin Li const int16x8_t R_Rounder = vdupq_n_s16(-14234); \
212*b2055c35SXin Li const int16x8_t G_Rounder = vdupq_n_s16(8708); \
213*b2055c35SXin Li const int16x8_t B_Rounder = vdupq_n_s16(-17685); \
214*b2055c35SXin Li \
215*b2055c35SXin Li /* Treat the first pixel in regular way */ \
216*b2055c35SXin Li assert(top_y != NULL); \
217*b2055c35SXin Li { \
218*b2055c35SXin Li const int u0 = (top_u[0] + u_diag) >> 1; \
219*b2055c35SXin Li const int v0 = (top_v[0] + v_diag) >> 1; \
220*b2055c35SXin Li VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
221*b2055c35SXin Li } \
222*b2055c35SXin Li if (bottom_y != NULL) { \
223*b2055c35SXin Li const int u0 = (cur_u[0] + u_diag) >> 1; \
224*b2055c35SXin Li const int v0 = (cur_v[0] + v_diag) >> 1; \
225*b2055c35SXin Li VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
226*b2055c35SXin Li } \
227*b2055c35SXin Li \
228*b2055c35SXin Li for (block = 0; block < num_blocks; ++block) { \
229*b2055c35SXin Li UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
230*b2055c35SXin Li UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
231*b2055c35SXin Li CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
232*b2055c35SXin Li top_dst, bottom_dst, 16 * block + 1, 16); \
233*b2055c35SXin Li top_u += 8; \
234*b2055c35SXin Li cur_u += 8; \
235*b2055c35SXin Li top_v += 8; \
236*b2055c35SXin Li cur_v += 8; \
237*b2055c35SXin Li } \
238*b2055c35SXin Li \
239*b2055c35SXin Li UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
240*b2055c35SXin Li UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
241*b2055c35SXin Li CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
242*b2055c35SXin Li top_dst, bottom_dst, last_pos, len - last_pos); \
243*b2055c35SXin Li }
244*b2055c35SXin Li
245*b2055c35SXin Li // NEON variants of the fancy upsampler.
246*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePair_NEON, Rgba, 4)
247*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleBgraLinePair_NEON, Bgra, 4)
248*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
249*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleRgbLinePair_NEON, Rgb, 3)
250*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleBgrLinePair_NEON, Bgr, 3)
251*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleArgbLinePair_NEON, Argb, 4)
252*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleRgba4444LinePair_NEON, Rgba4444, 2)
253*b2055c35SXin Li NEON_UPSAMPLE_FUNC(UpsampleRgb565LinePair_NEON, Rgb565, 2)
254*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
255*b2055c35SXin Li
256*b2055c35SXin Li //------------------------------------------------------------------------------
257*b2055c35SXin Li // Entry point
258*b2055c35SXin Li
259*b2055c35SXin Li extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
260*b2055c35SXin Li
261*b2055c35SXin Li extern void WebPInitUpsamplersNEON(void);
262*b2055c35SXin Li
WebPInitUpsamplersNEON(void)263*b2055c35SXin Li WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersNEON(void) {
264*b2055c35SXin Li WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_NEON;
265*b2055c35SXin Li WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_NEON;
266*b2055c35SXin Li WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_NEON;
267*b2055c35SXin Li WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_NEON;
268*b2055c35SXin Li #if !defined(WEBP_REDUCE_CSP)
269*b2055c35SXin Li WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_NEON;
270*b2055c35SXin Li WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_NEON;
271*b2055c35SXin Li WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_NEON;
272*b2055c35SXin Li WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_NEON;
273*b2055c35SXin Li WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_NEON;
274*b2055c35SXin Li WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_NEON;
275*b2055c35SXin Li WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_NEON;
276*b2055c35SXin Li #endif // WEBP_REDUCE_CSP
277*b2055c35SXin Li }
278*b2055c35SXin Li
279*b2055c35SXin Li #endif // FANCY_UPSAMPLING
280*b2055c35SXin Li
281*b2055c35SXin Li #endif // WEBP_USE_NEON
282*b2055c35SXin Li
283*b2055c35SXin Li #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_NEON))
284*b2055c35SXin Li WEBP_DSP_INIT_STUB(WebPInitUpsamplersNEON)
285*b2055c35SXin Li #endif
286