xref: /aosp_15_r20/external/libaom/aom_dsp/arm/blend_neon.h (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_ARM_BLEND_NEON_H_
13 #define AOM_AOM_DSP_ARM_BLEND_NEON_H_
14 
15 #include <arm_neon.h>
16 
17 #include "aom_dsp/blend.h"
18 
alpha_blend_a64_u8x16(uint8x16_t m,uint8x16_t a,uint8x16_t b)19 static inline uint8x16_t alpha_blend_a64_u8x16(uint8x16_t m, uint8x16_t a,
20                                                uint8x16_t b) {
21   const uint8x16_t m_inv = vsubq_u8(vdupq_n_u8(AOM_BLEND_A64_MAX_ALPHA), m);
22 
23   uint16x8_t blend_u16_lo = vmull_u8(vget_low_u8(m), vget_low_u8(a));
24   uint16x8_t blend_u16_hi = vmull_u8(vget_high_u8(m), vget_high_u8(a));
25 
26   blend_u16_lo = vmlal_u8(blend_u16_lo, vget_low_u8(m_inv), vget_low_u8(b));
27   blend_u16_hi = vmlal_u8(blend_u16_hi, vget_high_u8(m_inv), vget_high_u8(b));
28 
29   uint8x8_t blend_u8_lo = vrshrn_n_u16(blend_u16_lo, AOM_BLEND_A64_ROUND_BITS);
30   uint8x8_t blend_u8_hi = vrshrn_n_u16(blend_u16_hi, AOM_BLEND_A64_ROUND_BITS);
31 
32   return vcombine_u8(blend_u8_lo, blend_u8_hi);
33 }
34 
alpha_blend_a64_u8x8(uint8x8_t m,uint8x8_t a,uint8x8_t b)35 static inline uint8x8_t alpha_blend_a64_u8x8(uint8x8_t m, uint8x8_t a,
36                                              uint8x8_t b) {
37   const uint8x8_t m_inv = vsub_u8(vdup_n_u8(AOM_BLEND_A64_MAX_ALPHA), m);
38 
39   uint16x8_t blend_u16 = vmull_u8(m, a);
40 
41   blend_u16 = vmlal_u8(blend_u16, m_inv, b);
42 
43   return vrshrn_n_u16(blend_u16, AOM_BLEND_A64_ROUND_BITS);
44 }
45 
46 #if CONFIG_AV1_HIGHBITDEPTH
alpha_blend_a64_u16x8(uint16x8_t m,uint16x8_t a,uint16x8_t b)47 static inline uint16x8_t alpha_blend_a64_u16x8(uint16x8_t m, uint16x8_t a,
48                                                uint16x8_t b) {
49   uint16x8_t m_inv = vsubq_u16(vdupq_n_u16(AOM_BLEND_A64_MAX_ALPHA), m);
50 
51   uint32x4_t blend_u32_lo = vmull_u16(vget_low_u16(a), vget_low_u16(m));
52   uint32x4_t blend_u32_hi = vmull_u16(vget_high_u16(a), vget_high_u16(m));
53 
54   blend_u32_lo = vmlal_u16(blend_u32_lo, vget_low_u16(b), vget_low_u16(m_inv));
55   blend_u32_hi =
56       vmlal_u16(blend_u32_hi, vget_high_u16(b), vget_high_u16(m_inv));
57 
58   uint16x4_t blend_u16_lo =
59       vrshrn_n_u32(blend_u32_lo, AOM_BLEND_A64_ROUND_BITS);
60   uint16x4_t blend_u16_hi =
61       vrshrn_n_u32(blend_u32_hi, AOM_BLEND_A64_ROUND_BITS);
62 
63   return vcombine_u16(blend_u16_lo, blend_u16_hi);
64 }
65 
alpha_blend_a64_u16x4(uint16x4_t m,uint16x4_t a,uint16x4_t b)66 static inline uint16x4_t alpha_blend_a64_u16x4(uint16x4_t m, uint16x4_t a,
67                                                uint16x4_t b) {
68   const uint16x4_t m_inv = vsub_u16(vdup_n_u16(AOM_BLEND_A64_MAX_ALPHA), m);
69 
70   uint32x4_t blend_u16 = vmull_u16(m, a);
71 
72   blend_u16 = vmlal_u16(blend_u16, m_inv, b);
73 
74   return vrshrn_n_u32(blend_u16, AOM_BLEND_A64_ROUND_BITS);
75 }
76 #endif  // CONFIG_AV1_HIGHBITDEPTH
77 
avg_blend_u8x8(uint8x8_t a,uint8x8_t b)78 static inline uint8x8_t avg_blend_u8x8(uint8x8_t a, uint8x8_t b) {
79   return vrhadd_u8(a, b);
80 }
81 
avg_blend_u8x16(uint8x16_t a,uint8x16_t b)82 static inline uint8x16_t avg_blend_u8x16(uint8x16_t a, uint8x16_t b) {
83   return vrhaddq_u8(a, b);
84 }
85 
avg_blend_pairwise_u8x8(uint8x8_t a,uint8x8_t b)86 static inline uint8x8_t avg_blend_pairwise_u8x8(uint8x8_t a, uint8x8_t b) {
87   return vrshr_n_u8(vpadd_u8(a, b), 1);
88 }
89 
avg_blend_pairwise_u8x16(uint8x16_t a,uint8x16_t b)90 static inline uint8x16_t avg_blend_pairwise_u8x16(uint8x16_t a, uint8x16_t b) {
91 #if AOM_ARCH_AARCH64
92   return vrshrq_n_u8(vpaddq_u8(a, b), 1);
93 #else
94   uint8x8_t sum_pairwise_a = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
95   uint8x8_t sum_pairwise_b = vpadd_u8(vget_low_u8(b), vget_high_u8(b));
96   return vrshrq_n_u8(vcombine_u8(sum_pairwise_a, sum_pairwise_b), 1);
97 #endif  // AOM_ARCH_AARCH64
98 }
99 
avg_blend_pairwise_u8x8_4(uint8x8_t a,uint8x8_t b,uint8x8_t c,uint8x8_t d)100 static inline uint8x8_t avg_blend_pairwise_u8x8_4(uint8x8_t a, uint8x8_t b,
101                                                   uint8x8_t c, uint8x8_t d) {
102   uint8x8_t a_c = vpadd_u8(a, c);
103   uint8x8_t b_d = vpadd_u8(b, d);
104   return vrshr_n_u8(vqadd_u8(a_c, b_d), 2);
105 }
106 
avg_blend_pairwise_u8x16_4(uint8x16_t a,uint8x16_t b,uint8x16_t c,uint8x16_t d)107 static inline uint8x16_t avg_blend_pairwise_u8x16_4(uint8x16_t a, uint8x16_t b,
108                                                     uint8x16_t c,
109                                                     uint8x16_t d) {
110 #if AOM_ARCH_AARCH64
111   uint8x16_t a_c = vpaddq_u8(a, c);
112   uint8x16_t b_d = vpaddq_u8(b, d);
113   return vrshrq_n_u8(vqaddq_u8(a_c, b_d), 2);
114 #else
115   uint8x8_t sum_pairwise_a = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
116   uint8x8_t sum_pairwise_b = vpadd_u8(vget_low_u8(b), vget_high_u8(b));
117   uint8x8_t sum_pairwise_c = vpadd_u8(vget_low_u8(c), vget_high_u8(c));
118   uint8x8_t sum_pairwise_d = vpadd_u8(vget_low_u8(d), vget_high_u8(d));
119   uint8x16_t a_c = vcombine_u8(sum_pairwise_a, sum_pairwise_c);
120   uint8x16_t b_d = vcombine_u8(sum_pairwise_b, sum_pairwise_d);
121   return vrshrq_n_u8(vqaddq_u8(a_c, b_d), 2);
122 #endif  // AOM_ARCH_AARCH64
123 }
124 
125 #endif  // AOM_AOM_DSP_ARM_BLEND_NEON_H_
126