1 /*
2 * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13 #include <assert.h>
14
15 #include "config/aom_config.h"
16 #include "config/aom_dsp_rtcd.h"
17
18 #include "aom_dsp/arm/blend_neon.h"
19 #include "aom_dsp/arm/mem_neon.h"
20 #include "aom_dsp/blend.h"
21
aom_highbd_blend_a64_vmask_neon(uint8_t * dst_8,uint32_t dst_stride,const uint8_t * src0_8,uint32_t src0_stride,const uint8_t * src1_8,uint32_t src1_stride,const uint8_t * mask,int w,int h,int bd)22 void aom_highbd_blend_a64_vmask_neon(uint8_t *dst_8, uint32_t dst_stride,
23 const uint8_t *src0_8,
24 uint32_t src0_stride,
25 const uint8_t *src1_8,
26 uint32_t src1_stride, const uint8_t *mask,
27 int w, int h, int bd) {
28 (void)bd;
29
30 const uint16_t *src0 = CONVERT_TO_SHORTPTR(src0_8);
31 const uint16_t *src1 = CONVERT_TO_SHORTPTR(src1_8);
32 uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
33
34 assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
35 assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
36
37 assert(h >= 1);
38 assert(w >= 1);
39 assert(IS_POWER_OF_TWO(h));
40 assert(IS_POWER_OF_TWO(w));
41
42 assert(bd == 8 || bd == 10 || bd == 12);
43
44 if (w >= 8) {
45 do {
46 uint16x8_t m = vmovl_u8(vdup_n_u8(mask[0]));
47 int i = 0;
48 do {
49 uint16x8_t s0 = vld1q_u16(src0 + i);
50 uint16x8_t s1 = vld1q_u16(src1 + i);
51
52 uint16x8_t blend = alpha_blend_a64_u16x8(m, s0, s1);
53
54 vst1q_u16(dst + i, blend);
55 i += 8;
56 } while (i < w);
57
58 mask += 1;
59 src0 += src0_stride;
60 src1 += src1_stride;
61 dst += dst_stride;
62 } while (--h != 0);
63 } else if (w == 4) {
64 do {
65 uint16x4_t m1 = vdup_n_u16((uint16_t)mask[0]);
66 uint16x4_t m2 = vdup_n_u16((uint16_t)mask[1]);
67 uint16x8_t m = vcombine_u16(m1, m2);
68 uint16x8_t s0 = load_unaligned_u16_4x2(src0, src0_stride);
69 uint16x8_t s1 = load_unaligned_u16_4x2(src1, src1_stride);
70
71 uint16x8_t blend = alpha_blend_a64_u16x8(m, s0, s1);
72
73 store_u16x4_strided_x2(dst, dst_stride, blend);
74
75 mask += 2;
76 src0 += 2 * src0_stride;
77 src1 += 2 * src1_stride;
78 dst += 2 * dst_stride;
79 h -= 2;
80 } while (h != 0);
81 } else if (w == 2 && h >= 8) {
82 do {
83 uint16x4_t m0 = vdup_n_u16(0);
84 m0 = vld1_lane_u16((uint16_t *)mask, m0, 0);
85 uint8x8_t m0_zip =
86 vzip_u8(vreinterpret_u8_u16(m0), vreinterpret_u8_u16(m0)).val[0];
87 m0 = vget_low_u16(vmovl_u8(m0_zip));
88 uint16x4_t s0 = load_unaligned_u16_2x2(src0, src0_stride);
89 uint16x4_t s1 = load_unaligned_u16_2x2(src1, src1_stride);
90
91 uint16x4_t blend = alpha_blend_a64_u16x4(m0, s0, s1);
92
93 store_u16x2_strided_x2(dst, dst_stride, blend);
94
95 mask += 2;
96 src0 += 2 * src0_stride;
97 src1 += 2 * src1_stride;
98 dst += 2 * dst_stride;
99 h -= 2;
100 } while (h != 0);
101 } else {
102 aom_highbd_blend_a64_vmask_c(dst_8, dst_stride, src0_8, src0_stride, src1_8,
103 src1_stride, mask, w, h, bd);
104 }
105 }
106