1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_
12 #define VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_
13
14 #include <arm_neon.h>
15
16 #include "./vpx_config.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "vpx_dsp/vpx_filter.h"
19
convolve8_4(const int16x4_t s0,const int16x4_t s1,const int16x4_t s2,const int16x4_t s3,const int16x4_t s4,const int16x4_t s5,const int16x4_t s6,const int16x4_t s7,const int16x8_t filters)20 static INLINE int16x4_t convolve8_4(const int16x4_t s0, const int16x4_t s1,
21 const int16x4_t s2, const int16x4_t s3,
22 const int16x4_t s4, const int16x4_t s5,
23 const int16x4_t s6, const int16x4_t s7,
24 const int16x8_t filters) {
25 const int16x4_t filters_lo = vget_low_s16(filters);
26 const int16x4_t filters_hi = vget_high_s16(filters);
27 int16x4_t sum;
28
29 sum = vmul_lane_s16(s0, filters_lo, 0);
30 sum = vmla_lane_s16(sum, s1, filters_lo, 1);
31 sum = vmla_lane_s16(sum, s2, filters_lo, 2);
32 sum = vmla_lane_s16(sum, s5, filters_hi, 1);
33 sum = vmla_lane_s16(sum, s6, filters_hi, 2);
34 sum = vmla_lane_s16(sum, s7, filters_hi, 3);
35 sum = vqadd_s16(sum, vmul_lane_s16(s3, filters_lo, 3));
36 sum = vqadd_s16(sum, vmul_lane_s16(s4, filters_hi, 0));
37 return sum;
38 }
39
convolve8_8(const int16x8_t s0,const int16x8_t s1,const int16x8_t s2,const int16x8_t s3,const int16x8_t s4,const int16x8_t s5,const int16x8_t s6,const int16x8_t s7,const int16x8_t filters)40 static INLINE uint8x8_t convolve8_8(const int16x8_t s0, const int16x8_t s1,
41 const int16x8_t s2, const int16x8_t s3,
42 const int16x8_t s4, const int16x8_t s5,
43 const int16x8_t s6, const int16x8_t s7,
44 const int16x8_t filters) {
45 const int16x4_t filters_lo = vget_low_s16(filters);
46 const int16x4_t filters_hi = vget_high_s16(filters);
47 int16x8_t sum;
48
49 sum = vmulq_lane_s16(s0, filters_lo, 0);
50 sum = vmlaq_lane_s16(sum, s1, filters_lo, 1);
51 sum = vmlaq_lane_s16(sum, s2, filters_lo, 2);
52 sum = vmlaq_lane_s16(sum, s5, filters_hi, 1);
53 sum = vmlaq_lane_s16(sum, s6, filters_hi, 2);
54 sum = vmlaq_lane_s16(sum, s7, filters_hi, 3);
55 sum = vqaddq_s16(sum, vmulq_lane_s16(s3, filters_lo, 3));
56 sum = vqaddq_s16(sum, vmulq_lane_s16(s4, filters_hi, 0));
57 return vqrshrun_n_s16(sum, FILTER_BITS);
58 }
59
scale_filter_8(const uint8x8_t * const s,const int16x8_t filters)60 static INLINE uint8x8_t scale_filter_8(const uint8x8_t *const s,
61 const int16x8_t filters) {
62 int16x8_t ss[8];
63
64 ss[0] = vreinterpretq_s16_u16(vmovl_u8(s[0]));
65 ss[1] = vreinterpretq_s16_u16(vmovl_u8(s[1]));
66 ss[2] = vreinterpretq_s16_u16(vmovl_u8(s[2]));
67 ss[3] = vreinterpretq_s16_u16(vmovl_u8(s[3]));
68 ss[4] = vreinterpretq_s16_u16(vmovl_u8(s[4]));
69 ss[5] = vreinterpretq_s16_u16(vmovl_u8(s[5]));
70 ss[6] = vreinterpretq_s16_u16(vmovl_u8(s[6]));
71 ss[7] = vreinterpretq_s16_u16(vmovl_u8(s[7]));
72
73 return convolve8_8(ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7],
74 filters);
75 }
76
77 // 2-tap (bilinear) filter values are always positive, but 4-tap filter values
78 // are negative on the outer edges (taps 0 and 3), with taps 1 and 2 having much
79 // greater positive values to compensate. To use instructions that operate on
80 // 8-bit types we also need the types to be unsigned. Subtracting the products
81 // of taps 0 and 3 from the products of taps 1 and 2 always works given that
82 // 2-tap filters are 0-padded.
convolve4_8(const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2,const uint8x8_t s3,const uint8x8_t filter_taps[4])83 static INLINE uint8x8_t convolve4_8(const uint8x8_t s0, const uint8x8_t s1,
84 const uint8x8_t s2, const uint8x8_t s3,
85 const uint8x8_t filter_taps[4]) {
86 uint16x8_t sum = vmull_u8(s1, filter_taps[1]);
87 sum = vmlal_u8(sum, s2, filter_taps[2]);
88 sum = vmlsl_u8(sum, s0, filter_taps[0]);
89 sum = vmlsl_u8(sum, s3, filter_taps[3]);
90 // We halved the filter values so -1 from right shift.
91 return vqrshrun_n_s16(vreinterpretq_s16_u16(sum), FILTER_BITS - 1);
92 }
93
convolve_4tap_vert_neon(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,int w,int h,const int16x8_t filter)94 static INLINE void convolve_4tap_vert_neon(const uint8_t *src,
95 ptrdiff_t src_stride, uint8_t *dst,
96 ptrdiff_t dst_stride, int w, int h,
97 const int16x8_t filter) {
98 // 4-tap and bilinear filter values are even, so halve them to reduce
99 // intermediate precision requirements.
100 const uint8x8_t y_filter =
101 vshrn_n_u16(vreinterpretq_u16_s16(vabsq_s16(filter)), 1);
102
103 // Neon does not have lane-referencing multiply or multiply-accumulate
104 // instructions that operate on vectors of 8-bit elements. This means we have
105 // to duplicate filter taps into a whole vector and use standard multiply /
106 // multiply-accumulate instructions.
107 const uint8x8_t filter_taps[4] = { vdup_lane_u8(y_filter, 2),
108 vdup_lane_u8(y_filter, 3),
109 vdup_lane_u8(y_filter, 4),
110 vdup_lane_u8(y_filter, 5) };
111
112 if (w == 4) {
113 uint8x8_t s01 = load_unaligned_u8(src + 0 * src_stride, src_stride);
114 uint8x8_t s12 = load_unaligned_u8(src + 1 * src_stride, src_stride);
115
116 src += 2 * src_stride;
117
118 do {
119 uint8x8_t s23 = load_unaligned_u8(src + 0 * src_stride, src_stride);
120 uint8x8_t s34 = load_unaligned_u8(src + 1 * src_stride, src_stride);
121 uint8x8_t s45 = load_unaligned_u8(src + 2 * src_stride, src_stride);
122 uint8x8_t s56 = load_unaligned_u8(src + 3 * src_stride, src_stride);
123
124 uint8x8_t d01 = convolve4_8(s01, s12, s23, s34, filter_taps);
125 uint8x8_t d23 = convolve4_8(s23, s34, s45, s56, filter_taps);
126
127 store_unaligned_u8(dst + 0 * dst_stride, dst_stride, d01);
128 store_unaligned_u8(dst + 2 * dst_stride, dst_stride, d23);
129
130 s01 = s45;
131 s12 = s56;
132 src += 4 * src_stride;
133 dst += 4 * dst_stride;
134 h -= 4;
135 } while (h != 0);
136 } else {
137 do {
138 const uint8_t *s = src;
139 uint8_t *d = dst;
140 int height = h;
141
142 uint8x8_t s0, s1, s2;
143 load_u8_8x3(s, src_stride, &s0, &s1, &s2);
144
145 s += 3 * src_stride;
146
147 do {
148 uint8x8_t s3, s4, s5, s6;
149 load_u8_8x4(s, src_stride, &s3, &s4, &s5, &s6);
150
151 uint8x8_t d0 = convolve4_8(s0, s1, s2, s3, filter_taps);
152 uint8x8_t d1 = convolve4_8(s1, s2, s3, s4, filter_taps);
153 uint8x8_t d2 = convolve4_8(s2, s3, s4, s5, filter_taps);
154 uint8x8_t d3 = convolve4_8(s3, s4, s5, s6, filter_taps);
155
156 store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
157
158 s0 = s4;
159 s1 = s5;
160 s2 = s6;
161 s += 4 * src_stride;
162 d += 4 * dst_stride;
163 height -= 4;
164 } while (height != 0);
165 src += 8;
166 dst += 8;
167 w -= 8;
168 } while (w != 0);
169 }
170 }
171
172 #endif // VPX_VPX_DSP_ARM_VPX_CONVOLVE8_NEON_H_
173