1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include "./vpx_dsp_rtcd.h"
13 #include "vpx_dsp/mips/vpx_convolve_msa.h"
14
common_vt_8t_and_aver_dst_4w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)15 static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src,
16 int32_t src_stride, uint8_t *dst,
17 int32_t dst_stride, int8_t *filter,
18 int32_t height) {
19 uint32_t loop_cnt;
20 uint32_t tp0, tp1, tp2, tp3;
21 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
22 v16u8 dst0 = { 0 }, out;
23 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
24 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
25 v16i8 src10998, filt0, filt1, filt2, filt3;
26 v8i16 filt, out10, out32;
27
28 src -= (3 * src_stride);
29
30 filt = LD_SH(filter);
31 SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
32
33 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
34 src += (7 * src_stride);
35
36 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
37 src54_r, src21_r);
38 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
39 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110,
40 src4332, src6554);
41 XORI_B3_128_SB(src2110, src4332, src6554);
42
43 for (loop_cnt = (height >> 2); loop_cnt--;) {
44 LD_SB4(src, src_stride, src7, src8, src9, src10);
45 src += (4 * src_stride);
46
47 LW4(dst, dst_stride, tp0, tp1, tp2, tp3);
48 INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
49 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
50 src87_r, src98_r, src109_r);
51 ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998);
52 XORI_B2_128_SB(src8776, src10998);
53 out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0,
54 filt1, filt2, filt3);
55 out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0,
56 filt1, filt2, filt3);
57 SRARI_H2_SH(out10, out32, FILTER_BITS);
58 SAT_SH2_SH(out10, out32, 7);
59 out = PCKEV_XORI128_UB(out10, out32);
60 out = __msa_aver_u_b(out, dst0);
61
62 ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
63 dst += (4 * dst_stride);
64
65 src2110 = src6554;
66 src4332 = src8776;
67 src6554 = src10998;
68 src6 = src10;
69 }
70 }
71
common_vt_8t_and_aver_dst_8w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)72 static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src,
73 int32_t src_stride, uint8_t *dst,
74 int32_t dst_stride, int8_t *filter,
75 int32_t height) {
76 uint32_t loop_cnt;
77 uint64_t tp0, tp1, tp2, tp3;
78 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
79 v16u8 dst0 = { 0 }, dst1 = { 0 };
80 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
81 v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
82 v8i16 filt, out0, out1, out2, out3;
83
84 src -= (3 * src_stride);
85
86 filt = LD_SH(filter);
87 SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
88
89 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
90 src += (7 * src_stride);
91
92 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
93 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
94 src54_r, src21_r);
95 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
96
97 for (loop_cnt = (height >> 2); loop_cnt--;) {
98 LD_SB4(src, src_stride, src7, src8, src9, src10);
99 src += (4 * src_stride);
100
101 LD4(dst, dst_stride, tp0, tp1, tp2, tp3);
102 INSERT_D2_UB(tp0, tp1, dst0);
103 INSERT_D2_UB(tp2, tp3, dst1);
104 XORI_B4_128_SB(src7, src8, src9, src10);
105 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
106 src87_r, src98_r, src109_r);
107 out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, filt1,
108 filt2, filt3);
109 out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, filt1,
110 filt2, filt3);
111 out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, filt1,
112 filt2, filt3);
113 out3 = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
114 filt1, filt2, filt3);
115 SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
116 SAT_SH4_SH(out0, out1, out2, out3, 7);
117 CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst,
118 dst_stride);
119 dst += (4 * dst_stride);
120
121 src10_r = src54_r;
122 src32_r = src76_r;
123 src54_r = src98_r;
124 src21_r = src65_r;
125 src43_r = src87_r;
126 src65_r = src109_r;
127 src6 = src10;
128 }
129 }
130
common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height,int32_t width)131 static void common_vt_8t_and_aver_dst_16w_mult_msa(
132 const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
133 int8_t *filter, int32_t height, int32_t width) {
134 const uint8_t *src_tmp;
135 uint8_t *dst_tmp;
136 uint32_t loop_cnt, cnt;
137 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
138 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
139 v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
140 v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
141 v16i8 filt0, filt1, filt2, filt3;
142 v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
143 v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt;
144
145 src -= (3 * src_stride);
146
147 filt = LD_SH(filter);
148 SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
149
150 for (cnt = (width >> 4); cnt--;) {
151 src_tmp = src;
152 dst_tmp = dst;
153
154 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
155 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
156 src_tmp += (7 * src_stride);
157
158 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
159 src54_r, src21_r);
160 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
161 ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
162 src54_l, src21_l);
163 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
164
165 for (loop_cnt = (height >> 2); loop_cnt--;) {
166 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
167 src_tmp += (4 * src_stride);
168
169 LD_UB4(dst_tmp, dst_stride, dst0, dst1, dst2, dst3);
170 XORI_B4_128_SB(src7, src8, src9, src10);
171 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
172 src87_r, src98_r, src109_r);
173 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
174 src87_l, src98_l, src109_l);
175 out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
176 filt1, filt2, filt3);
177 out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
178 filt1, filt2, filt3);
179 out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
180 filt1, filt2, filt3);
181 out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
182 filt1, filt2, filt3);
183 out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
184 filt1, filt2, filt3);
185 out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
186 filt1, filt2, filt3);
187 out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
188 filt1, filt2, filt3);
189 out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
190 filt1, filt2, filt3);
191 SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
192 SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
193 SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
194 SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
195 PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
196 out3_r, tmp0, tmp1, tmp2, tmp3);
197 XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
198 AVER_UB4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst0, dst1,
199 dst2, dst3);
200 ST_UB4(dst0, dst1, dst2, dst3, dst_tmp, dst_stride);
201 dst_tmp += (4 * dst_stride);
202
203 src10_r = src54_r;
204 src32_r = src76_r;
205 src54_r = src98_r;
206 src21_r = src65_r;
207 src43_r = src87_r;
208 src65_r = src109_r;
209 src10_l = src54_l;
210 src32_l = src76_l;
211 src54_l = src98_l;
212 src21_l = src65_l;
213 src43_l = src87_l;
214 src65_l = src109_l;
215 src6 = src10;
216 }
217
218 src += 16;
219 dst += 16;
220 }
221 }
222
common_vt_8t_and_aver_dst_16w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)223 static void common_vt_8t_and_aver_dst_16w_msa(const uint8_t *src,
224 int32_t src_stride, uint8_t *dst,
225 int32_t dst_stride,
226 int8_t *filter, int32_t height) {
227 common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
228 filter, height, 16);
229 }
230
common_vt_8t_and_aver_dst_32w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)231 static void common_vt_8t_and_aver_dst_32w_msa(const uint8_t *src,
232 int32_t src_stride, uint8_t *dst,
233 int32_t dst_stride,
234 int8_t *filter, int32_t height) {
235 common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
236 filter, height, 32);
237 }
238
common_vt_8t_and_aver_dst_64w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)239 static void common_vt_8t_and_aver_dst_64w_msa(const uint8_t *src,
240 int32_t src_stride, uint8_t *dst,
241 int32_t dst_stride,
242 int8_t *filter, int32_t height) {
243 common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride,
244 filter, height, 64);
245 }
246
common_vt_2t_and_aver_dst_4x4_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)247 static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src,
248 int32_t src_stride, uint8_t *dst,
249 int32_t dst_stride,
250 int8_t *filter) {
251 uint32_t tp0, tp1, tp2, tp3;
252 v16i8 src0, src1, src2, src3, src4;
253 v16u8 dst0 = { 0 }, out, filt0, src2110, src4332;
254 v16i8 src10_r, src32_r, src21_r, src43_r;
255 v8i16 filt;
256 v8u16 tmp0, tmp1;
257
258 filt = LD_SH(filter);
259 filt0 = (v16u8)__msa_splati_h(filt, 0);
260
261 LD_SB4(src, src_stride, src0, src1, src2, src3);
262 src += (4 * src_stride);
263
264 src4 = LD_SB(src);
265 src += src_stride;
266
267 LW4(dst, dst_stride, tp0, tp1, tp2, tp3);
268 INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
269 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
270 src32_r, src43_r);
271 ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
272 DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
273 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
274
275 out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
276 out = __msa_aver_u_b(out, dst0);
277
278 ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
279 }
280
common_vt_2t_and_aver_dst_4x8_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)281 static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
282 int32_t src_stride, uint8_t *dst,
283 int32_t dst_stride,
284 int8_t *filter) {
285 uint32_t tp0, tp1, tp2, tp3;
286 v16u8 dst0 = { 0 }, dst1 = { 0 };
287 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src87_r;
288 v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
289 v16u8 src2110, src4332, src6554, src8776, filt0;
290 v8u16 tmp0, tmp1, tmp2, tmp3;
291 v8i16 filt;
292
293 filt = LD_SH(filter);
294 filt0 = (v16u8)__msa_splati_h(filt, 0);
295
296 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
297 src += (8 * src_stride);
298 src8 = LD_SB(src);
299
300 LW4(dst, dst_stride, tp0, tp1, tp2, tp3);
301 INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
302 LW4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3);
303 INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
304 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
305 src32_r, src43_r);
306 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
307 src76_r, src87_r);
308 ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r,
309 src76_r, src2110, src4332, src6554, src8776);
310 DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
311 tmp0, tmp1, tmp2, tmp3);
312 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
313 PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
314 AVER_UB2_UB(src2110, dst0, src4332, dst1, src2110, src4332);
315 ST4x8_UB(src2110, src4332, dst, dst_stride);
316 }
317
common_vt_2t_and_aver_dst_4w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)318 static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src,
319 int32_t src_stride, uint8_t *dst,
320 int32_t dst_stride, int8_t *filter,
321 int32_t height) {
322 if (4 == height) {
323 common_vt_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
324 } else if (8 == height) {
325 common_vt_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
326 }
327 }
328
common_vt_2t_and_aver_dst_8x4_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter)329 static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src,
330 int32_t src_stride, uint8_t *dst,
331 int32_t dst_stride,
332 int8_t *filter) {
333 int64_t tp0, tp1, tp2, tp3;
334 v16u8 src0, src1, src2, src3, src4;
335 v16u8 dst0 = { 0 }, dst1 = { 0 }, vec0, vec1, vec2, vec3, filt0;
336 v8u16 tmp0, tmp1, tmp2, tmp3;
337 v8i16 filt;
338
339 /* rearranging filter_y */
340 filt = LD_SH(filter);
341 filt0 = (v16u8)__msa_splati_h(filt, 0);
342
343 LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
344 LD4(dst, dst_stride, tp0, tp1, tp2, tp3);
345 INSERT_D2_UB(tp0, tp1, dst0);
346 INSERT_D2_UB(tp2, tp3, dst1);
347 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
348 ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
349 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
350 tmp2, tmp3);
351 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
352 PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride);
353 }
354
common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)355 static void common_vt_2t_and_aver_dst_8x8mult_msa(
356 const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride,
357 int8_t *filter, int32_t height) {
358 uint32_t loop_cnt;
359 int64_t tp0, tp1, tp2, tp3;
360 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
361 v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
362 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
363 v8u16 tmp0, tmp1, tmp2, tmp3;
364 v8i16 filt;
365
366 /* rearranging filter_y */
367 filt = LD_SH(filter);
368 filt0 = (v16u8)__msa_splati_h(filt, 0);
369
370 src0 = LD_UB(src);
371 src += src_stride;
372
373 for (loop_cnt = (height >> 3); loop_cnt--;) {
374 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
375 src += (8 * src_stride);
376 LD4(dst, dst_stride, tp0, tp1, tp2, tp3);
377 INSERT_D2_UB(tp0, tp1, dst0);
378 INSERT_D2_UB(tp2, tp3, dst1);
379 LD4(dst + 4 * dst_stride, dst_stride, tp0, tp1, tp2, tp3);
380 INSERT_D2_UB(tp0, tp1, dst2);
381 INSERT_D2_UB(tp2, tp3, dst3);
382
383 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
384 vec3);
385 ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6,
386 vec7);
387 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
388 tmp2, tmp3);
389 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
390 PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst, dst_stride);
391 dst += (4 * dst_stride);
392
393 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
394 tmp2, tmp3);
395 SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
396 PCKEV_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst2, dst3, dst, dst_stride);
397 dst += (4 * dst_stride);
398
399 src0 = src8;
400 }
401 }
402
common_vt_2t_and_aver_dst_8w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)403 static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src,
404 int32_t src_stride, uint8_t *dst,
405 int32_t dst_stride, int8_t *filter,
406 int32_t height) {
407 if (4 == height) {
408 common_vt_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
409 } else {
410 common_vt_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride,
411 filter, height);
412 }
413 }
414
common_vt_2t_and_aver_dst_16w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)415 static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
416 int32_t src_stride, uint8_t *dst,
417 int32_t dst_stride,
418 int8_t *filter, int32_t height) {
419 uint32_t loop_cnt;
420 v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0;
421 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
422 v8u16 tmp0, tmp1, tmp2, tmp3, filt;
423
424 /* rearranging filter_y */
425 filt = LD_UH(filter);
426 filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
427
428 src0 = LD_UB(src);
429 src += src_stride;
430
431 for (loop_cnt = (height >> 2); loop_cnt--;) {
432 LD_UB4(src, src_stride, src1, src2, src3, src4);
433 src += (4 * src_stride);
434
435 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
436 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
437 ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
438 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
439 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
440 PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
441 dst += dst_stride;
442
443 ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
444 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
445 DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
446 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
447 PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst);
448 dst += dst_stride;
449
450 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
451 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
452 PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst);
453 dst += dst_stride;
454
455 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
456 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
457 PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst);
458 dst += dst_stride;
459
460 src0 = src4;
461 }
462 }
463
common_vt_2t_and_aver_dst_32w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)464 static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src,
465 int32_t src_stride, uint8_t *dst,
466 int32_t dst_stride,
467 int8_t *filter, int32_t height) {
468 uint32_t loop_cnt;
469 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
470 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
471 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
472 v8u16 tmp0, tmp1, tmp2, tmp3, filt;
473
474 /* rearranging filter_y */
475 filt = LD_UH(filter);
476 filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
477
478 LD_UB2(src, 16, src0, src5);
479 src += src_stride;
480
481 for (loop_cnt = (height >> 2); loop_cnt--;) {
482 LD_UB4(src, src_stride, src1, src2, src3, src4);
483 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
484 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
485 ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
486
487 LD_UB4(src + 16, src_stride, src6, src7, src8, src9);
488 LD_UB4(dst + 16, dst_stride, dst4, dst5, dst6, dst7);
489 src += (4 * src_stride);
490
491 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
492 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
493 PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
494
495 DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
496 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
497 PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
498
499 ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
500 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
501 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
502 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
503 PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst + 2 * dst_stride);
504
505 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
506 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
507 PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst + 3 * dst_stride);
508
509 ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2);
510 ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3);
511 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
512 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
513 PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 16);
514
515 DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
516 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
517 PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 16 + dst_stride);
518
519 ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6);
520 ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7);
521 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
522 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
523 PCKEV_AVG_ST_UB(tmp1, tmp0, dst6, dst + 16 + 2 * dst_stride);
524
525 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
526 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
527 PCKEV_AVG_ST_UB(tmp3, tmp2, dst7, dst + 16 + 3 * dst_stride);
528 dst += (4 * dst_stride);
529
530 src0 = src4;
531 src5 = src9;
532 }
533 }
534
common_vt_2t_and_aver_dst_64w_msa(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,int8_t * filter,int32_t height)535 static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src,
536 int32_t src_stride, uint8_t *dst,
537 int32_t dst_stride,
538 int8_t *filter, int32_t height) {
539 uint32_t loop_cnt;
540 v16u8 src0, src1, src2, src3, src4, src5;
541 v16u8 src6, src7, src8, src9, src10, src11, filt0;
542 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
543 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
544 v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
545 v8u16 filt;
546
547 /* rearranging filter_y */
548 filt = LD_UH(filter);
549 filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
550
551 LD_UB4(src, 16, src0, src3, src6, src9);
552 src += src_stride;
553
554 for (loop_cnt = (height >> 1); loop_cnt--;) {
555 LD_UB2(src, src_stride, src1, src2);
556 LD_UB2(dst, dst_stride, dst0, dst1);
557 LD_UB2(src + 16, src_stride, src4, src5);
558 LD_UB2(dst + 16, dst_stride, dst2, dst3);
559 LD_UB2(src + 32, src_stride, src7, src8);
560 LD_UB2(dst + 32, dst_stride, dst4, dst5);
561 LD_UB2(src + 48, src_stride, src10, src11);
562 LD_UB2(dst + 48, dst_stride, dst6, dst7);
563 src += (2 * src_stride);
564
565 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
566 ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
567 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
568 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
569 PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
570
571 DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
572 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
573 PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
574
575 ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6);
576 ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7);
577 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
578 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
579 PCKEV_AVG_ST_UB(tmp5, tmp4, dst2, dst + 16);
580
581 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
582 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
583 PCKEV_AVG_ST_UB(tmp7, tmp6, dst3, dst + 16 + dst_stride);
584
585 ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2);
586 ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3);
587 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
588 SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
589 PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 32);
590
591 DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
592 SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
593 PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 32 + dst_stride);
594
595 ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6);
596 ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7);
597 DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
598 SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
599 PCKEV_AVG_ST_UB(tmp5, tmp4, dst6, (dst + 48));
600
601 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
602 SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
603 PCKEV_AVG_ST_UB(tmp7, tmp6, dst7, dst + 48 + dst_stride);
604 dst += (2 * dst_stride);
605
606 src0 = src2;
607 src3 = src5;
608 src6 = src8;
609 src9 = src11;
610 }
611 }
612
vpx_convolve8_avg_vert_msa(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)613 void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
614 uint8_t *dst, ptrdiff_t dst_stride,
615 const InterpKernel *filter, int x0_q4,
616 int x_step_q4, int y0_q4, int y_step_q4, int w,
617 int h) {
618 const int16_t *const filter_y = filter[y0_q4];
619 int8_t cnt, filt_ver[8];
620
621 assert(y_step_q4 == 16);
622 assert(((const int32_t *)filter_y)[1] != 0x800000);
623
624 for (cnt = 0; cnt < 8; ++cnt) {
625 filt_ver[cnt] = filter_y[cnt];
626 }
627
628 if (vpx_get_filter_taps(filter_y) == 2) {
629 switch (w) {
630 case 4:
631 common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
632 (int32_t)dst_stride, &filt_ver[3], h);
633 break;
634 case 8:
635 common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
636 (int32_t)dst_stride, &filt_ver[3], h);
637 break;
638 case 16:
639 common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
640 (int32_t)dst_stride, &filt_ver[3], h);
641 break;
642 case 32:
643 common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
644 (int32_t)dst_stride, &filt_ver[3], h);
645 break;
646 case 64:
647 common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
648 (int32_t)dst_stride, &filt_ver[3], h);
649 break;
650 default:
651 vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
652 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
653 break;
654 }
655 } else {
656 switch (w) {
657 case 4:
658 common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst,
659 (int32_t)dst_stride, filt_ver, h);
660 break;
661 case 8:
662 common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst,
663 (int32_t)dst_stride, filt_ver, h);
664 break;
665 case 16:
666 common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst,
667 (int32_t)dst_stride, filt_ver, h);
668
669 break;
670 case 32:
671 common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst,
672 (int32_t)dst_stride, filt_ver, h);
673 break;
674 case 64:
675 common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst,
676 (int32_t)dst_stride, filt_ver, h);
677 break;
678 default:
679 vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
680 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
681 break;
682 }
683 }
684 }
685