1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/arm/highbd_idct_neon.h"
16 #include "vpx_dsp/arm/idct_neon.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/txfm_common.h"
19
load_8x8_s32_dual(const tran_low_t * input,int32x4x2_t * const in0,int32x4x2_t * const in1,int32x4x2_t * const in2,int32x4x2_t * const in3,int32x4x2_t * const in4,int32x4x2_t * const in5,int32x4x2_t * const in6,int32x4x2_t * const in7)20 static INLINE void load_8x8_s32_dual(
21 const tran_low_t *input, int32x4x2_t *const in0, int32x4x2_t *const in1,
22 int32x4x2_t *const in2, int32x4x2_t *const in3, int32x4x2_t *const in4,
23 int32x4x2_t *const in5, int32x4x2_t *const in6, int32x4x2_t *const in7) {
24 in0->val[0] = vld1q_s32(input);
25 in0->val[1] = vld1q_s32(input + 4);
26 input += 32;
27 in1->val[0] = vld1q_s32(input);
28 in1->val[1] = vld1q_s32(input + 4);
29 input += 32;
30 in2->val[0] = vld1q_s32(input);
31 in2->val[1] = vld1q_s32(input + 4);
32 input += 32;
33 in3->val[0] = vld1q_s32(input);
34 in3->val[1] = vld1q_s32(input + 4);
35 input += 32;
36 in4->val[0] = vld1q_s32(input);
37 in4->val[1] = vld1q_s32(input + 4);
38 input += 32;
39 in5->val[0] = vld1q_s32(input);
40 in5->val[1] = vld1q_s32(input + 4);
41 input += 32;
42 in6->val[0] = vld1q_s32(input);
43 in6->val[1] = vld1q_s32(input + 4);
44 input += 32;
45 in7->val[0] = vld1q_s32(input);
46 in7->val[1] = vld1q_s32(input + 4);
47 }
48
load_4x8_s32_dual(const tran_low_t * input,int32x4_t * const in0,int32x4_t * const in1,int32x4_t * const in2,int32x4_t * const in3,int32x4_t * const in4,int32x4_t * const in5,int32x4_t * const in6,int32x4_t * const in7)49 static INLINE void load_4x8_s32_dual(const tran_low_t *input,
50 int32x4_t *const in0, int32x4_t *const in1,
51 int32x4_t *const in2, int32x4_t *const in3,
52 int32x4_t *const in4, int32x4_t *const in5,
53 int32x4_t *const in6,
54 int32x4_t *const in7) {
55 *in0 = vld1q_s32(input);
56 input += 32;
57 *in1 = vld1q_s32(input);
58 input += 32;
59 *in2 = vld1q_s32(input);
60 input += 32;
61 *in3 = vld1q_s32(input);
62 input += 32;
63 *in4 = vld1q_s32(input);
64 input += 32;
65 *in5 = vld1q_s32(input);
66 input += 32;
67 *in6 = vld1q_s32(input);
68 input += 32;
69 *in7 = vld1q_s32(input);
70 }
71
72 // Only for the first pass of the _135_ variant. Since it only uses values from
73 // the top left 16x16 it can safely assume all the remaining values are 0 and
74 // skip an awful lot of calculations. In fact, only the first 12 columns make
75 // the cut. None of the elements in the 13th, 14th, 15th or 16th columns are
76 // used so it skips any calls to input[12|13|14|15] too.
77 // In C this does a single row of 32 for each call. Here it transposes the top
78 // left 12x8 to allow using SIMD.
79
80 // vp9/common/vp9_scan.c:vp9_default_iscan_32x32 arranges the first 135 non-zero
81 // coefficients as follows:
82 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
83 // 0 0 2 5 10 17 25 38 47 62 83 101 121
84 // 1 1 4 8 15 22 30 45 58 74 92 112 133
85 // 2 3 7 12 18 28 36 52 64 82 102 118
86 // 3 6 11 16 23 31 43 60 73 90 109 126
87 // 4 9 14 19 29 37 50 65 78 98 116 134
88 // 5 13 20 26 35 44 54 72 85 105 123
89 // 6 21 27 33 42 53 63 80 94 113 132
90 // 7 24 32 39 48 57 71 88 104 120
91 // 8 34 40 46 56 68 81 96 111 130
92 // 9 41 49 55 67 77 91 107 124
93 // 10 51 59 66 76 89 99 119 131
94 // 11 61 69 75 87 100 114 129
95 // 12 70 79 86 97 108 122
96 // 13 84 93 103 110 125
97 // 14 98 106 115 127
98 // 15 117 128
vpx_highbd_idct32_12_neon(const tran_low_t * const input,int32_t * output)99 static void vpx_highbd_idct32_12_neon(const tran_low_t *const input,
100 int32_t *output) {
101 int32x4x2_t in[12], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
102 s8[32];
103
104 load_8x8_s32_dual(input, &in[0], &in[1], &in[2], &in[3], &in[4], &in[5],
105 &in[6], &in[7]);
106 transpose_s32_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
107 &in[7]);
108
109 load_4x8_s32_dual(input + 8, &in[8].val[0], &in[8].val[1], &in[9].val[0],
110 &in[9].val[1], &in[10].val[0], &in[10].val[1],
111 &in[11].val[0], &in[11].val[1]);
112 transpose_s32_4x8(&in[8].val[0], &in[8].val[1], &in[9].val[0], &in[9].val[1],
113 &in[10].val[0], &in[10].val[1], &in[11].val[0],
114 &in[11].val[1]);
115
116 // stage 1
117 s1[16] = multiply_shift_and_narrow_s32_dual(in[1], cospi_31_64);
118 s1[31] = multiply_shift_and_narrow_s32_dual(in[1], cospi_1_64);
119
120 s1[18] = multiply_shift_and_narrow_s32_dual(in[9], cospi_23_64);
121 s1[29] = multiply_shift_and_narrow_s32_dual(in[9], cospi_9_64);
122
123 s1[19] = multiply_shift_and_narrow_s32_dual(in[7], -cospi_25_64);
124 s1[28] = multiply_shift_and_narrow_s32_dual(in[7], cospi_7_64);
125
126 s1[20] = multiply_shift_and_narrow_s32_dual(in[5], cospi_27_64);
127 s1[27] = multiply_shift_and_narrow_s32_dual(in[5], cospi_5_64);
128
129 s1[21] = multiply_shift_and_narrow_s32_dual(in[11], -cospi_21_64);
130 s1[26] = multiply_shift_and_narrow_s32_dual(in[11], cospi_11_64);
131
132 s1[23] = multiply_shift_and_narrow_s32_dual(in[3], -cospi_29_64);
133 s1[24] = multiply_shift_and_narrow_s32_dual(in[3], cospi_3_64);
134
135 // stage 2
136 s2[8] = multiply_shift_and_narrow_s32_dual(in[2], cospi_30_64);
137 s2[15] = multiply_shift_and_narrow_s32_dual(in[2], cospi_2_64);
138
139 s2[10] = multiply_shift_and_narrow_s32_dual(in[10], cospi_22_64);
140 s2[13] = multiply_shift_and_narrow_s32_dual(in[10], cospi_10_64);
141
142 s2[11] = multiply_shift_and_narrow_s32_dual(in[6], -cospi_26_64);
143 s2[12] = multiply_shift_and_narrow_s32_dual(in[6], cospi_6_64);
144
145 s2[18] = highbd_idct_sub_dual(s1[19], s1[18]);
146 s2[19] = highbd_idct_add_dual(s1[18], s1[19]);
147 s2[20] = highbd_idct_add_dual(s1[20], s1[21]);
148 s2[21] = highbd_idct_sub_dual(s1[20], s1[21]);
149 s2[26] = highbd_idct_sub_dual(s1[27], s1[26]);
150 s2[27] = highbd_idct_add_dual(s1[26], s1[27]);
151 s2[28] = highbd_idct_add_dual(s1[28], s1[29]);
152 s2[29] = highbd_idct_sub_dual(s1[28], s1[29]);
153
154 // stage 3
155 s3[4] = multiply_shift_and_narrow_s32_dual(in[4], cospi_28_64);
156 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64);
157
158 s3[10] = highbd_idct_sub_dual(s2[11], s2[10]);
159 s3[11] = highbd_idct_add_dual(s2[10], s2[11]);
160 s3[12] = highbd_idct_add_dual(s2[12], s2[13]);
161 s3[13] = highbd_idct_sub_dual(s2[12], s2[13]);
162
163 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64,
164 s1[31], cospi_28_64);
165 s3[30] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], cospi_28_64,
166 s1[31], cospi_4_64);
167
168 s3[18] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_28_64,
169 s2[29], -cospi_4_64);
170 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64,
171 s2[29], cospi_28_64);
172
173 s3[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_20_64,
174 s2[26], cospi_12_64);
175 s3[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], cospi_12_64,
176 s2[26], cospi_20_64);
177
178 s3[22] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_12_64,
179 s1[24], -cospi_20_64);
180 s3[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64,
181 s1[24], cospi_12_64);
182
183 // stage 4
184 s4[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
185 s4[2] = multiply_shift_and_narrow_s32_dual(in[8], cospi_24_64);
186 s4[3] = multiply_shift_and_narrow_s32_dual(in[8], cospi_8_64);
187
188 s4[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64,
189 s2[15], cospi_24_64);
190 s4[14] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], cospi_24_64,
191 s2[15], cospi_8_64);
192
193 s4[10] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_24_64,
194 s3[13], -cospi_8_64);
195 s4[13] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_8_64,
196 s3[13], cospi_24_64);
197
198 s4[16] = highbd_idct_add_dual(s1[16], s2[19]);
199 s4[17] = highbd_idct_add_dual(s3[17], s3[18]);
200 s4[18] = highbd_idct_sub_dual(s3[17], s3[18]);
201 s4[19] = highbd_idct_sub_dual(s1[16], s2[19]);
202 s4[20] = highbd_idct_sub_dual(s1[23], s2[20]);
203 s4[21] = highbd_idct_sub_dual(s3[22], s3[21]);
204 s4[22] = highbd_idct_add_dual(s3[21], s3[22]);
205 s4[23] = highbd_idct_add_dual(s2[20], s1[23]);
206 s4[24] = highbd_idct_add_dual(s1[24], s2[27]);
207 s4[25] = highbd_idct_add_dual(s3[25], s3[26]);
208 s4[26] = highbd_idct_sub_dual(s3[25], s3[26]);
209 s4[27] = highbd_idct_sub_dual(s1[24], s2[27]);
210 s4[28] = highbd_idct_sub_dual(s1[31], s2[28]);
211 s4[29] = highbd_idct_sub_dual(s3[30], s3[29]);
212 s4[30] = highbd_idct_add_dual(s3[29], s3[30]);
213 s4[31] = highbd_idct_add_dual(s2[28], s1[31]);
214
215 // stage 5
216 s5[0] = highbd_idct_add_dual(s4[0], s4[3]);
217 s5[1] = highbd_idct_add_dual(s4[0], s4[2]);
218 s5[2] = highbd_idct_sub_dual(s4[0], s4[2]);
219 s5[3] = highbd_idct_sub_dual(s4[0], s4[3]);
220
221 s5[5] = sub_multiply_shift_and_narrow_s32_dual(s3[7], s3[4], cospi_16_64);
222 s5[6] = add_multiply_shift_and_narrow_s32_dual(s3[4], s3[7], cospi_16_64);
223
224 s5[8] = highbd_idct_add_dual(s2[8], s3[11]);
225 s5[9] = highbd_idct_add_dual(s4[9], s4[10]);
226 s5[10] = highbd_idct_sub_dual(s4[9], s4[10]);
227 s5[11] = highbd_idct_sub_dual(s2[8], s3[11]);
228 s5[12] = highbd_idct_sub_dual(s2[15], s3[12]);
229 s5[13] = highbd_idct_sub_dual(s4[14], s4[13]);
230 s5[14] = highbd_idct_add_dual(s4[13], s4[14]);
231 s5[15] = highbd_idct_add_dual(s2[15], s3[12]);
232
233 s5[18] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], -cospi_8_64,
234 s4[29], cospi_24_64);
235 s5[29] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], cospi_24_64,
236 s4[29], cospi_8_64);
237
238 s5[19] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], -cospi_8_64,
239 s4[28], cospi_24_64);
240 s5[28] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], cospi_24_64,
241 s4[28], cospi_8_64);
242
243 s5[20] = multiply_accumulate_shift_and_narrow_s32_dual(s4[20], -cospi_24_64,
244 s4[27], -cospi_8_64);
245 s5[27] = multiply_accumulate_shift_and_narrow_s32_dual(s4[20], -cospi_8_64,
246 s4[27], cospi_24_64);
247
248 s5[21] = multiply_accumulate_shift_and_narrow_s32_dual(s4[21], -cospi_24_64,
249 s4[26], -cospi_8_64);
250 s5[26] = multiply_accumulate_shift_and_narrow_s32_dual(s4[21], -cospi_8_64,
251 s4[26], cospi_24_64);
252
253 // stage 6
254 s6[0] = highbd_idct_add_dual(s5[0], s3[7]);
255 s6[1] = highbd_idct_add_dual(s5[1], s5[6]);
256 s6[2] = highbd_idct_add_dual(s5[2], s5[5]);
257 s6[3] = highbd_idct_add_dual(s5[3], s3[4]);
258 s6[4] = highbd_idct_sub_dual(s5[3], s3[4]);
259 s6[5] = highbd_idct_sub_dual(s5[2], s5[5]);
260 s6[6] = highbd_idct_sub_dual(s5[1], s5[6]);
261 s6[7] = highbd_idct_sub_dual(s5[0], s3[7]);
262
263 s6[10] = sub_multiply_shift_and_narrow_s32_dual(s5[13], s5[10], cospi_16_64);
264 s6[13] = add_multiply_shift_and_narrow_s32_dual(s5[10], s5[13], cospi_16_64);
265
266 s6[11] = sub_multiply_shift_and_narrow_s32_dual(s5[12], s5[11], cospi_16_64);
267 s6[12] = add_multiply_shift_and_narrow_s32_dual(s5[11], s5[12], cospi_16_64);
268
269 s6[16] = highbd_idct_add_dual(s4[16], s4[23]);
270 s6[17] = highbd_idct_add_dual(s4[17], s4[22]);
271 s6[18] = highbd_idct_add_dual(s5[18], s5[21]);
272 s6[19] = highbd_idct_add_dual(s5[19], s5[20]);
273 s6[20] = highbd_idct_sub_dual(s5[19], s5[20]);
274 s6[21] = highbd_idct_sub_dual(s5[18], s5[21]);
275 s6[22] = highbd_idct_sub_dual(s4[17], s4[22]);
276 s6[23] = highbd_idct_sub_dual(s4[16], s4[23]);
277
278 s6[24] = highbd_idct_sub_dual(s4[31], s4[24]);
279 s6[25] = highbd_idct_sub_dual(s4[30], s4[25]);
280 s6[26] = highbd_idct_sub_dual(s5[29], s5[26]);
281 s6[27] = highbd_idct_sub_dual(s5[28], s5[27]);
282 s6[28] = highbd_idct_add_dual(s5[27], s5[28]);
283 s6[29] = highbd_idct_add_dual(s5[26], s5[29]);
284 s6[30] = highbd_idct_add_dual(s4[25], s4[30]);
285 s6[31] = highbd_idct_add_dual(s4[24], s4[31]);
286
287 // stage 7
288 s7[0] = highbd_idct_add_dual(s6[0], s5[15]);
289 s7[1] = highbd_idct_add_dual(s6[1], s5[14]);
290 s7[2] = highbd_idct_add_dual(s6[2], s6[13]);
291 s7[3] = highbd_idct_add_dual(s6[3], s6[12]);
292 s7[4] = highbd_idct_add_dual(s6[4], s6[11]);
293 s7[5] = highbd_idct_add_dual(s6[5], s6[10]);
294 s7[6] = highbd_idct_add_dual(s6[6], s5[9]);
295 s7[7] = highbd_idct_add_dual(s6[7], s5[8]);
296 s7[8] = highbd_idct_sub_dual(s6[7], s5[8]);
297 s7[9] = highbd_idct_sub_dual(s6[6], s5[9]);
298 s7[10] = highbd_idct_sub_dual(s6[5], s6[10]);
299 s7[11] = highbd_idct_sub_dual(s6[4], s6[11]);
300 s7[12] = highbd_idct_sub_dual(s6[3], s6[12]);
301 s7[13] = highbd_idct_sub_dual(s6[2], s6[13]);
302 s7[14] = highbd_idct_sub_dual(s6[1], s5[14]);
303 s7[15] = highbd_idct_sub_dual(s6[0], s5[15]);
304
305 s7[20] = sub_multiply_shift_and_narrow_s32_dual(s6[27], s6[20], cospi_16_64);
306 s7[27] = add_multiply_shift_and_narrow_s32_dual(s6[20], s6[27], cospi_16_64);
307
308 s7[21] = sub_multiply_shift_and_narrow_s32_dual(s6[26], s6[21], cospi_16_64);
309 s7[26] = add_multiply_shift_and_narrow_s32_dual(s6[21], s6[26], cospi_16_64);
310
311 s7[22] = sub_multiply_shift_and_narrow_s32_dual(s6[25], s6[22], cospi_16_64);
312 s7[25] = add_multiply_shift_and_narrow_s32_dual(s6[22], s6[25], cospi_16_64);
313
314 s7[23] = sub_multiply_shift_and_narrow_s32_dual(s6[24], s6[23], cospi_16_64);
315 s7[24] = add_multiply_shift_and_narrow_s32_dual(s6[23], s6[24], cospi_16_64);
316
317 // final stage
318 s8[0] = highbd_idct_add_dual(s7[0], s6[31]);
319 s8[1] = highbd_idct_add_dual(s7[1], s6[30]);
320 s8[2] = highbd_idct_add_dual(s7[2], s6[29]);
321 s8[3] = highbd_idct_add_dual(s7[3], s6[28]);
322 s8[4] = highbd_idct_add_dual(s7[4], s7[27]);
323 s8[5] = highbd_idct_add_dual(s7[5], s7[26]);
324 s8[6] = highbd_idct_add_dual(s7[6], s7[25]);
325 s8[7] = highbd_idct_add_dual(s7[7], s7[24]);
326 s8[8] = highbd_idct_add_dual(s7[8], s7[23]);
327 s8[9] = highbd_idct_add_dual(s7[9], s7[22]);
328 s8[10] = highbd_idct_add_dual(s7[10], s7[21]);
329 s8[11] = highbd_idct_add_dual(s7[11], s7[20]);
330 s8[12] = highbd_idct_add_dual(s7[12], s6[19]);
331 s8[13] = highbd_idct_add_dual(s7[13], s6[18]);
332 s8[14] = highbd_idct_add_dual(s7[14], s6[17]);
333 s8[15] = highbd_idct_add_dual(s7[15], s6[16]);
334 s8[16] = highbd_idct_sub_dual(s7[15], s6[16]);
335 s8[17] = highbd_idct_sub_dual(s7[14], s6[17]);
336 s8[18] = highbd_idct_sub_dual(s7[13], s6[18]);
337 s8[19] = highbd_idct_sub_dual(s7[12], s6[19]);
338 s8[20] = highbd_idct_sub_dual(s7[11], s7[20]);
339 s8[21] = highbd_idct_sub_dual(s7[10], s7[21]);
340 s8[22] = highbd_idct_sub_dual(s7[9], s7[22]);
341 s8[23] = highbd_idct_sub_dual(s7[8], s7[23]);
342 s8[24] = highbd_idct_sub_dual(s7[7], s7[24]);
343 s8[25] = highbd_idct_sub_dual(s7[6], s7[25]);
344 s8[26] = highbd_idct_sub_dual(s7[5], s7[26]);
345 s8[27] = highbd_idct_sub_dual(s7[4], s7[27]);
346 s8[28] = highbd_idct_sub_dual(s7[3], s6[28]);
347 s8[29] = highbd_idct_sub_dual(s7[2], s6[29]);
348 s8[30] = highbd_idct_sub_dual(s7[1], s6[30]);
349 s8[31] = highbd_idct_sub_dual(s7[0], s6[31]);
350
351 vst1q_s32(output + 0, s8[0].val[0]);
352 vst1q_s32(output + 4, s8[0].val[1]);
353 output += 16;
354 vst1q_s32(output + 0, s8[1].val[0]);
355 vst1q_s32(output + 4, s8[1].val[1]);
356 output += 16;
357 vst1q_s32(output + 0, s8[2].val[0]);
358 vst1q_s32(output + 4, s8[2].val[1]);
359 output += 16;
360 vst1q_s32(output + 0, s8[3].val[0]);
361 vst1q_s32(output + 4, s8[3].val[1]);
362 output += 16;
363 vst1q_s32(output + 0, s8[4].val[0]);
364 vst1q_s32(output + 4, s8[4].val[1]);
365 output += 16;
366 vst1q_s32(output + 0, s8[5].val[0]);
367 vst1q_s32(output + 4, s8[5].val[1]);
368 output += 16;
369 vst1q_s32(output + 0, s8[6].val[0]);
370 vst1q_s32(output + 4, s8[6].val[1]);
371 output += 16;
372 vst1q_s32(output + 0, s8[7].val[0]);
373 vst1q_s32(output + 4, s8[7].val[1]);
374 output += 16;
375
376 vst1q_s32(output + 0, s8[8].val[0]);
377 vst1q_s32(output + 4, s8[8].val[1]);
378 output += 16;
379 vst1q_s32(output + 0, s8[9].val[0]);
380 vst1q_s32(output + 4, s8[9].val[1]);
381 output += 16;
382 vst1q_s32(output + 0, s8[10].val[0]);
383 vst1q_s32(output + 4, s8[10].val[1]);
384 output += 16;
385 vst1q_s32(output + 0, s8[11].val[0]);
386 vst1q_s32(output + 4, s8[11].val[1]);
387 output += 16;
388 vst1q_s32(output + 0, s8[12].val[0]);
389 vst1q_s32(output + 4, s8[12].val[1]);
390 output += 16;
391 vst1q_s32(output + 0, s8[13].val[0]);
392 vst1q_s32(output + 4, s8[13].val[1]);
393 output += 16;
394 vst1q_s32(output + 0, s8[14].val[0]);
395 vst1q_s32(output + 4, s8[14].val[1]);
396 output += 16;
397 vst1q_s32(output + 0, s8[15].val[0]);
398 vst1q_s32(output + 4, s8[15].val[1]);
399 output += 16;
400
401 vst1q_s32(output + 0, s8[16].val[0]);
402 vst1q_s32(output + 4, s8[16].val[1]);
403 output += 16;
404 vst1q_s32(output + 0, s8[17].val[0]);
405 vst1q_s32(output + 4, s8[17].val[1]);
406 output += 16;
407 vst1q_s32(output + 0, s8[18].val[0]);
408 vst1q_s32(output + 4, s8[18].val[1]);
409 output += 16;
410 vst1q_s32(output + 0, s8[19].val[0]);
411 vst1q_s32(output + 4, s8[19].val[1]);
412 output += 16;
413 vst1q_s32(output + 0, s8[20].val[0]);
414 vst1q_s32(output + 4, s8[20].val[1]);
415 output += 16;
416 vst1q_s32(output + 0, s8[21].val[0]);
417 vst1q_s32(output + 4, s8[21].val[1]);
418 output += 16;
419 vst1q_s32(output + 0, s8[22].val[0]);
420 vst1q_s32(output + 4, s8[22].val[1]);
421 output += 16;
422 vst1q_s32(output + 0, s8[23].val[0]);
423 vst1q_s32(output + 4, s8[23].val[1]);
424 output += 16;
425
426 vst1q_s32(output + 0, s8[24].val[0]);
427 vst1q_s32(output + 4, s8[24].val[1]);
428 output += 16;
429 vst1q_s32(output + 0, s8[25].val[0]);
430 vst1q_s32(output + 4, s8[25].val[1]);
431 output += 16;
432 vst1q_s32(output + 0, s8[26].val[0]);
433 vst1q_s32(output + 4, s8[26].val[1]);
434 output += 16;
435 vst1q_s32(output + 0, s8[27].val[0]);
436 vst1q_s32(output + 4, s8[27].val[1]);
437 output += 16;
438 vst1q_s32(output + 0, s8[28].val[0]);
439 vst1q_s32(output + 4, s8[28].val[1]);
440 output += 16;
441 vst1q_s32(output + 0, s8[29].val[0]);
442 vst1q_s32(output + 4, s8[29].val[1]);
443 output += 16;
444 vst1q_s32(output + 0, s8[30].val[0]);
445 vst1q_s32(output + 4, s8[30].val[1]);
446 output += 16;
447 vst1q_s32(output + 0, s8[31].val[0]);
448 vst1q_s32(output + 4, s8[31].val[1]);
449 }
450
vpx_highbd_idct32_16_neon(const int32_t * const input,uint16_t * const output,const int stride,const int bd)451 static void vpx_highbd_idct32_16_neon(const int32_t *const input,
452 uint16_t *const output, const int stride,
453 const int bd) {
454 int32x4x2_t in[16], s1[32], s2[32], s3[32], s4[32], s5[32], s6[32], s7[32],
455 out[32];
456
457 load_and_transpose_s32_8x8(input, 16, &in[0], &in[1], &in[2], &in[3], &in[4],
458 &in[5], &in[6], &in[7]);
459
460 load_and_transpose_s32_8x8(input + 8, 16, &in[8], &in[9], &in[10], &in[11],
461 &in[12], &in[13], &in[14], &in[15]);
462
463 // stage 1
464 s1[16] = multiply_shift_and_narrow_s32_dual(in[1], cospi_31_64);
465 s1[31] = multiply_shift_and_narrow_s32_dual(in[1], cospi_1_64);
466
467 s1[17] = multiply_shift_and_narrow_s32_dual(in[15], -cospi_17_64);
468 s1[30] = multiply_shift_and_narrow_s32_dual(in[15], cospi_15_64);
469
470 s1[18] = multiply_shift_and_narrow_s32_dual(in[9], cospi_23_64);
471 s1[29] = multiply_shift_and_narrow_s32_dual(in[9], cospi_9_64);
472
473 s1[19] = multiply_shift_and_narrow_s32_dual(in[7], -cospi_25_64);
474 s1[28] = multiply_shift_and_narrow_s32_dual(in[7], cospi_7_64);
475
476 s1[20] = multiply_shift_and_narrow_s32_dual(in[5], cospi_27_64);
477 s1[27] = multiply_shift_and_narrow_s32_dual(in[5], cospi_5_64);
478
479 s1[21] = multiply_shift_and_narrow_s32_dual(in[11], -cospi_21_64);
480 s1[26] = multiply_shift_and_narrow_s32_dual(in[11], cospi_11_64);
481
482 s1[22] = multiply_shift_and_narrow_s32_dual(in[13], cospi_19_64);
483 s1[25] = multiply_shift_and_narrow_s32_dual(in[13], cospi_13_64);
484
485 s1[23] = multiply_shift_and_narrow_s32_dual(in[3], -cospi_29_64);
486 s1[24] = multiply_shift_and_narrow_s32_dual(in[3], cospi_3_64);
487
488 // stage 2
489 s2[8] = multiply_shift_and_narrow_s32_dual(in[2], cospi_30_64);
490 s2[15] = multiply_shift_and_narrow_s32_dual(in[2], cospi_2_64);
491
492 s2[9] = multiply_shift_and_narrow_s32_dual(in[14], -cospi_18_64);
493 s2[14] = multiply_shift_and_narrow_s32_dual(in[14], cospi_14_64);
494
495 s2[10] = multiply_shift_and_narrow_s32_dual(in[10], cospi_22_64);
496 s2[13] = multiply_shift_and_narrow_s32_dual(in[10], cospi_10_64);
497
498 s2[11] = multiply_shift_and_narrow_s32_dual(in[6], -cospi_26_64);
499 s2[12] = multiply_shift_and_narrow_s32_dual(in[6], cospi_6_64);
500
501 s2[16] = highbd_idct_add_dual(s1[16], s1[17]);
502 s2[17] = highbd_idct_sub_dual(s1[16], s1[17]);
503 s2[18] = highbd_idct_sub_dual(s1[19], s1[18]);
504 s2[19] = highbd_idct_add_dual(s1[18], s1[19]);
505 s2[20] = highbd_idct_add_dual(s1[20], s1[21]);
506 s2[21] = highbd_idct_sub_dual(s1[20], s1[21]);
507 s2[22] = highbd_idct_sub_dual(s1[23], s1[22]);
508 s2[23] = highbd_idct_add_dual(s1[22], s1[23]);
509 s2[24] = highbd_idct_add_dual(s1[24], s1[25]);
510 s2[25] = highbd_idct_sub_dual(s1[24], s1[25]);
511 s2[26] = highbd_idct_sub_dual(s1[27], s1[26]);
512 s2[27] = highbd_idct_add_dual(s1[26], s1[27]);
513 s2[28] = highbd_idct_add_dual(s1[28], s1[29]);
514 s2[29] = highbd_idct_sub_dual(s1[28], s1[29]);
515 s2[30] = highbd_idct_sub_dual(s1[31], s1[30]);
516 s2[31] = highbd_idct_add_dual(s1[30], s1[31]);
517
518 // stage 3
519 s3[4] = multiply_shift_and_narrow_s32_dual(in[4], cospi_28_64);
520 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64);
521
522 s3[5] = multiply_shift_and_narrow_s32_dual(in[12], -cospi_20_64);
523 s3[6] = multiply_shift_and_narrow_s32_dual(in[12], cospi_12_64);
524
525 s3[8] = highbd_idct_add_dual(s2[8], s2[9]);
526 s3[9] = highbd_idct_sub_dual(s2[8], s2[9]);
527 s3[10] = highbd_idct_sub_dual(s2[11], s2[10]);
528 s3[11] = highbd_idct_add_dual(s2[10], s2[11]);
529 s3[12] = highbd_idct_add_dual(s2[12], s2[13]);
530 s3[13] = highbd_idct_sub_dual(s2[12], s2[13]);
531 s3[14] = highbd_idct_sub_dual(s2[15], s2[14]);
532 s3[15] = highbd_idct_add_dual(s2[14], s2[15]);
533
534 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s2[17], -cospi_4_64,
535 s2[30], cospi_28_64);
536 s3[30] = multiply_accumulate_shift_and_narrow_s32_dual(s2[17], cospi_28_64,
537 s2[30], cospi_4_64);
538
539 s3[18] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_28_64,
540 s2[29], -cospi_4_64);
541 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64,
542 s2[29], cospi_28_64);
543
544 s3[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_20_64,
545 s2[26], cospi_12_64);
546 s3[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], cospi_12_64,
547 s2[26], cospi_20_64);
548
549 s3[22] = multiply_accumulate_shift_and_narrow_s32_dual(s2[22], -cospi_12_64,
550 s2[25], -cospi_20_64);
551 s3[25] = multiply_accumulate_shift_and_narrow_s32_dual(s2[22], -cospi_20_64,
552 s2[25], cospi_12_64);
553
554 // stage 4
555 s4[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
556 s4[2] = multiply_shift_and_narrow_s32_dual(in[8], cospi_24_64);
557 s4[3] = multiply_shift_and_narrow_s32_dual(in[8], cospi_8_64);
558
559 s4[4] = highbd_idct_add_dual(s3[4], s3[5]);
560 s4[5] = highbd_idct_sub_dual(s3[4], s3[5]);
561 s4[6] = highbd_idct_sub_dual(s3[7], s3[6]);
562 s4[7] = highbd_idct_add_dual(s3[6], s3[7]);
563
564 s4[9] = multiply_accumulate_shift_and_narrow_s32_dual(s3[9], -cospi_8_64,
565 s3[14], cospi_24_64);
566 s4[14] = multiply_accumulate_shift_and_narrow_s32_dual(s3[9], cospi_24_64,
567 s3[14], cospi_8_64);
568
569 s4[10] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_24_64,
570 s3[13], -cospi_8_64);
571 s4[13] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_8_64,
572 s3[13], cospi_24_64);
573
574 s4[16] = highbd_idct_add_dual(s2[16], s2[19]);
575 s4[17] = highbd_idct_add_dual(s3[17], s3[18]);
576 s4[18] = highbd_idct_sub_dual(s3[17], s3[18]);
577 s4[19] = highbd_idct_sub_dual(s2[16], s2[19]);
578 s4[20] = highbd_idct_sub_dual(s2[23], s2[20]);
579 s4[21] = highbd_idct_sub_dual(s3[22], s3[21]);
580 s4[22] = highbd_idct_add_dual(s3[21], s3[22]);
581 s4[23] = highbd_idct_add_dual(s2[20], s2[23]);
582 s4[24] = highbd_idct_add_dual(s2[24], s2[27]);
583 s4[25] = highbd_idct_add_dual(s3[25], s3[26]);
584 s4[26] = highbd_idct_sub_dual(s3[25], s3[26]);
585 s4[27] = highbd_idct_sub_dual(s2[24], s2[27]);
586 s4[28] = highbd_idct_sub_dual(s2[31], s2[28]);
587 s4[29] = highbd_idct_sub_dual(s3[30], s3[29]);
588 s4[30] = highbd_idct_add_dual(s3[29], s3[30]);
589 s4[31] = highbd_idct_add_dual(s2[28], s2[31]);
590
591 // stage 5
592 s5[0] = highbd_idct_add_dual(s4[0], s4[3]);
593 s5[1] = highbd_idct_add_dual(s4[0], s4[2]);
594 s5[2] = highbd_idct_sub_dual(s4[0], s4[2]);
595 s5[3] = highbd_idct_sub_dual(s4[0], s4[3]);
596
597 s5[5] = sub_multiply_shift_and_narrow_s32_dual(s4[6], s4[5], cospi_16_64);
598 s5[6] = add_multiply_shift_and_narrow_s32_dual(s4[5], s4[6], cospi_16_64);
599
600 s5[8] = highbd_idct_add_dual(s3[8], s3[11]);
601 s5[9] = highbd_idct_add_dual(s4[9], s4[10]);
602 s5[10] = highbd_idct_sub_dual(s4[9], s4[10]);
603 s5[11] = highbd_idct_sub_dual(s3[8], s3[11]);
604 s5[12] = highbd_idct_sub_dual(s3[15], s3[12]);
605 s5[13] = highbd_idct_sub_dual(s4[14], s4[13]);
606 s5[14] = highbd_idct_add_dual(s4[13], s4[14]);
607 s5[15] = highbd_idct_add_dual(s3[15], s3[12]);
608
609 s5[18] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], -cospi_8_64,
610 s4[29], cospi_24_64);
611 s5[29] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], cospi_24_64,
612 s4[29], cospi_8_64);
613
614 s5[19] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], -cospi_8_64,
615 s4[28], cospi_24_64);
616 s5[28] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], cospi_24_64,
617 s4[28], cospi_8_64);
618
619 s5[20] = multiply_accumulate_shift_and_narrow_s32_dual(s4[20], -cospi_24_64,
620 s4[27], -cospi_8_64);
621 s5[27] = multiply_accumulate_shift_and_narrow_s32_dual(s4[20], -cospi_8_64,
622 s4[27], cospi_24_64);
623
624 s5[21] = multiply_accumulate_shift_and_narrow_s32_dual(s4[21], -cospi_24_64,
625 s4[26], -cospi_8_64);
626 s5[26] = multiply_accumulate_shift_and_narrow_s32_dual(s4[21], -cospi_8_64,
627 s4[26], cospi_24_64);
628
629 // stage 6
630 s6[0] = highbd_idct_add_dual(s5[0], s4[7]);
631 s6[1] = highbd_idct_add_dual(s5[1], s5[6]);
632 s6[2] = highbd_idct_add_dual(s5[2], s5[5]);
633 s6[3] = highbd_idct_add_dual(s5[3], s4[4]);
634 s6[4] = highbd_idct_sub_dual(s5[3], s4[4]);
635 s6[5] = highbd_idct_sub_dual(s5[2], s5[5]);
636 s6[6] = highbd_idct_sub_dual(s5[1], s5[6]);
637 s6[7] = highbd_idct_sub_dual(s5[0], s4[7]);
638
639 s6[10] = sub_multiply_shift_and_narrow_s32_dual(s5[13], s5[10], cospi_16_64);
640 s6[13] = add_multiply_shift_and_narrow_s32_dual(s5[10], s5[13], cospi_16_64);
641
642 s6[11] = sub_multiply_shift_and_narrow_s32_dual(s5[12], s5[11], cospi_16_64);
643 s6[12] = add_multiply_shift_and_narrow_s32_dual(s5[11], s5[12], cospi_16_64);
644
645 s6[16] = highbd_idct_add_dual(s4[16], s4[23]);
646 s6[17] = highbd_idct_add_dual(s4[17], s4[22]);
647 s6[18] = highbd_idct_add_dual(s5[18], s5[21]);
648 s6[19] = highbd_idct_add_dual(s5[19], s5[20]);
649 s6[20] = highbd_idct_sub_dual(s5[19], s5[20]);
650 s6[21] = highbd_idct_sub_dual(s5[18], s5[21]);
651 s6[22] = highbd_idct_sub_dual(s4[17], s4[22]);
652 s6[23] = highbd_idct_sub_dual(s4[16], s4[23]);
653 s6[24] = highbd_idct_sub_dual(s4[31], s4[24]);
654 s6[25] = highbd_idct_sub_dual(s4[30], s4[25]);
655 s6[26] = highbd_idct_sub_dual(s5[29], s5[26]);
656 s6[27] = highbd_idct_sub_dual(s5[28], s5[27]);
657 s6[28] = highbd_idct_add_dual(s5[27], s5[28]);
658 s6[29] = highbd_idct_add_dual(s5[26], s5[29]);
659 s6[30] = highbd_idct_add_dual(s4[25], s4[30]);
660 s6[31] = highbd_idct_add_dual(s4[24], s4[31]);
661
662 // stage 7
663 s7[0] = highbd_idct_add_dual(s6[0], s5[15]);
664 s7[1] = highbd_idct_add_dual(s6[1], s5[14]);
665 s7[2] = highbd_idct_add_dual(s6[2], s6[13]);
666 s7[3] = highbd_idct_add_dual(s6[3], s6[12]);
667 s7[4] = highbd_idct_add_dual(s6[4], s6[11]);
668 s7[5] = highbd_idct_add_dual(s6[5], s6[10]);
669 s7[6] = highbd_idct_add_dual(s6[6], s5[9]);
670 s7[7] = highbd_idct_add_dual(s6[7], s5[8]);
671 s7[8] = highbd_idct_sub_dual(s6[7], s5[8]);
672 s7[9] = highbd_idct_sub_dual(s6[6], s5[9]);
673 s7[10] = highbd_idct_sub_dual(s6[5], s6[10]);
674 s7[11] = highbd_idct_sub_dual(s6[4], s6[11]);
675 s7[12] = highbd_idct_sub_dual(s6[3], s6[12]);
676 s7[13] = highbd_idct_sub_dual(s6[2], s6[13]);
677 s7[14] = highbd_idct_sub_dual(s6[1], s5[14]);
678 s7[15] = highbd_idct_sub_dual(s6[0], s5[15]);
679
680 s7[20] = sub_multiply_shift_and_narrow_s32_dual(s6[27], s6[20], cospi_16_64);
681 s7[27] = add_multiply_shift_and_narrow_s32_dual(s6[20], s6[27], cospi_16_64);
682
683 s7[21] = sub_multiply_shift_and_narrow_s32_dual(s6[26], s6[21], cospi_16_64);
684 s7[26] = add_multiply_shift_and_narrow_s32_dual(s6[21], s6[26], cospi_16_64);
685
686 s7[22] = sub_multiply_shift_and_narrow_s32_dual(s6[25], s6[22], cospi_16_64);
687 s7[25] = add_multiply_shift_and_narrow_s32_dual(s6[22], s6[25], cospi_16_64);
688
689 s7[23] = sub_multiply_shift_and_narrow_s32_dual(s6[24], s6[23], cospi_16_64);
690 s7[24] = add_multiply_shift_and_narrow_s32_dual(s6[23], s6[24], cospi_16_64);
691
692 // final stage
693 out[0] = highbd_idct_add_dual(s7[0], s6[31]);
694 out[1] = highbd_idct_add_dual(s7[1], s6[30]);
695 out[2] = highbd_idct_add_dual(s7[2], s6[29]);
696 out[3] = highbd_idct_add_dual(s7[3], s6[28]);
697 out[4] = highbd_idct_add_dual(s7[4], s7[27]);
698 out[5] = highbd_idct_add_dual(s7[5], s7[26]);
699 out[6] = highbd_idct_add_dual(s7[6], s7[25]);
700 out[7] = highbd_idct_add_dual(s7[7], s7[24]);
701 out[8] = highbd_idct_add_dual(s7[8], s7[23]);
702 out[9] = highbd_idct_add_dual(s7[9], s7[22]);
703 out[10] = highbd_idct_add_dual(s7[10], s7[21]);
704 out[11] = highbd_idct_add_dual(s7[11], s7[20]);
705 out[12] = highbd_idct_add_dual(s7[12], s6[19]);
706 out[13] = highbd_idct_add_dual(s7[13], s6[18]);
707 out[14] = highbd_idct_add_dual(s7[14], s6[17]);
708 out[15] = highbd_idct_add_dual(s7[15], s6[16]);
709 out[16] = highbd_idct_sub_dual(s7[15], s6[16]);
710 out[17] = highbd_idct_sub_dual(s7[14], s6[17]);
711 out[18] = highbd_idct_sub_dual(s7[13], s6[18]);
712 out[19] = highbd_idct_sub_dual(s7[12], s6[19]);
713 out[20] = highbd_idct_sub_dual(s7[11], s7[20]);
714 out[21] = highbd_idct_sub_dual(s7[10], s7[21]);
715 out[22] = highbd_idct_sub_dual(s7[9], s7[22]);
716 out[23] = highbd_idct_sub_dual(s7[8], s7[23]);
717 out[24] = highbd_idct_sub_dual(s7[7], s7[24]);
718 out[25] = highbd_idct_sub_dual(s7[6], s7[25]);
719 out[26] = highbd_idct_sub_dual(s7[5], s7[26]);
720 out[27] = highbd_idct_sub_dual(s7[4], s7[27]);
721 out[28] = highbd_idct_sub_dual(s7[3], s6[28]);
722 out[29] = highbd_idct_sub_dual(s7[2], s6[29]);
723 out[30] = highbd_idct_sub_dual(s7[1], s6[30]);
724 out[31] = highbd_idct_sub_dual(s7[0], s6[31]);
725
726 highbd_idct16x16_add_store(out, output, stride, bd);
727 highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
728 }
729
vpx_highbd_idct32x32_135_add_neon(const tran_low_t * input,uint16_t * dest,int stride,int bd)730 void vpx_highbd_idct32x32_135_add_neon(const tran_low_t *input, uint16_t *dest,
731 int stride, int bd) {
732 int i;
733
734 if (bd == 8) {
735 int16_t temp[32 * 16];
736 int16_t *t = temp;
737 vpx_idct32_12_neon(input, temp);
738 vpx_idct32_12_neon(input + 32 * 8, temp + 8);
739
740 for (i = 0; i < 32; i += 8) {
741 vpx_idct32_16_neon(t, dest, stride, 1);
742 t += (16 * 8);
743 dest += 8;
744 }
745 } else {
746 int32_t temp[32 * 16];
747 int32_t *t = temp;
748 vpx_highbd_idct32_12_neon(input, temp);
749 vpx_highbd_idct32_12_neon(input + 32 * 8, temp + 8);
750
751 for (i = 0; i < 32; i += 8) {
752 vpx_highbd_idct32_16_neon(t, dest, stride, bd);
753 t += (16 * 8);
754 dest += 8;
755 }
756 }
757 }
758