1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/txfm_common.h"
16 #include "vpx_dsp/arm/mem_neon.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/arm/fdct16x16_neon.h"
19
20 // Some builds of gcc 4.9.2 and .3 have trouble with some of the inline
21 // functions.
22 #if !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) && \
23 __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4
24
vpx_fdct16x16_neon(const int16_t * input,tran_low_t * output,int stride)25 void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
26 vpx_fdct16x16_c(input, output, stride);
27 }
28
29 #else
30
31 // Main body of fdct16x16.
vpx_fdct8x16_body(const int16x8_t * in,int16x8_t * out)32 static void vpx_fdct8x16_body(const int16x8_t *in /*[16]*/,
33 int16x8_t *out /*[16]*/) {
34 int16x8_t s[8];
35 int16x8_t x[4];
36 int16x8_t step[8];
37
38 // stage 1
39 // From fwd_txfm.c: Work on the first eight values; fdct8(input,
40 // even_results);"
41 s[0] = vaddq_s16(in[0], in[7]);
42 s[1] = vaddq_s16(in[1], in[6]);
43 s[2] = vaddq_s16(in[2], in[5]);
44 s[3] = vaddq_s16(in[3], in[4]);
45 s[4] = vsubq_s16(in[3], in[4]);
46 s[5] = vsubq_s16(in[2], in[5]);
47 s[6] = vsubq_s16(in[1], in[6]);
48 s[7] = vsubq_s16(in[0], in[7]);
49
50 // fdct4(step, step);
51 x[0] = vaddq_s16(s[0], s[3]);
52 x[1] = vaddq_s16(s[1], s[2]);
53 x[2] = vsubq_s16(s[1], s[2]);
54 x[3] = vsubq_s16(s[0], s[3]);
55
56 // out[0] = fdct_round_shift((x0 + x1) * cospi_16_64)
57 // out[8] = fdct_round_shift((x0 - x1) * cospi_16_64)
58 butterfly_one_coeff_s16_s32_fast_narrow(x[0], x[1], cospi_16_64, &out[0],
59 &out[8]);
60 // out[4] = fdct_round_shift(x3 * cospi_8_64 + x2 * cospi_24_64);
61 // out[12] = fdct_round_shift(x3 * cospi_24_64 - x2 * cospi_8_64);
62 butterfly_two_coeff(x[3], x[2], cospi_8_64, cospi_24_64, &out[4], &out[12]);
63
64 // Stage 2
65 // Re-using source s5/s6
66 // s5 = fdct_round_shift((s6 - s5) * cospi_16_64)
67 // s6 = fdct_round_shift((s6 + s5) * cospi_16_64)
68 butterfly_one_coeff_s16_fast(s[6], s[5], cospi_16_64, &s[6], &s[5]);
69
70 // Stage 3
71 x[0] = vaddq_s16(s[4], s[5]);
72 x[1] = vsubq_s16(s[4], s[5]);
73 x[2] = vsubq_s16(s[7], s[6]);
74 x[3] = vaddq_s16(s[7], s[6]);
75
76 // Stage 4
77 // out[2] = fdct_round_shift(x3 * cospi_4_64 + x0 * cospi_28_64)
78 // out[14] = fdct_round_shift(x3 * cospi_28_64 - x0 * cospi_4_64)
79 butterfly_two_coeff(x[3], x[0], cospi_4_64, cospi_28_64, &out[2], &out[14]);
80 // out[6] = fdct_round_shift(x2 * cospi_20_64 + x1 * cospi_12_64)
81 // out[10] = fdct_round_shift(x2 * cospi_12_64 - x1 * cospi_20_64)
82 butterfly_two_coeff(x[2], x[1], cospi_20_64, cospi_12_64, &out[10], &out[6]);
83
84 // step 2
85 // From fwd_txfm.c: Work on the next eight values; step1 -> odd_results"
86 // That file distinguished between "in_high" and "step1" but the only
87 // difference is that "in_high" is the first 8 values and "step 1" is the
88 // second. Here, since they are all in one array, "step1" values are += 8.
89
90 // step2[2] = fdct_round_shift((step1[5] - step1[2]) * cospi_16_64)
91 // step2[3] = fdct_round_shift((step1[4] - step1[3]) * cospi_16_64)
92 // step2[4] = fdct_round_shift((step1[4] + step1[3]) * cospi_16_64)
93 // step2[5] = fdct_round_shift((step1[5] + step1[2]) * cospi_16_64)
94 butterfly_one_coeff_s16_fast(in[13], in[10], cospi_16_64, &s[5], &s[2]);
95 butterfly_one_coeff_s16_fast(in[12], in[11], cospi_16_64, &s[4], &s[3]);
96
97 // step 3
98 s[0] = vaddq_s16(in[8], s[3]);
99 s[1] = vaddq_s16(in[9], s[2]);
100 x[0] = vsubq_s16(in[9], s[2]);
101 x[1] = vsubq_s16(in[8], s[3]);
102 x[2] = vsubq_s16(in[15], s[4]);
103 x[3] = vsubq_s16(in[14], s[5]);
104 s[6] = vaddq_s16(in[14], s[5]);
105 s[7] = vaddq_s16(in[15], s[4]);
106
107 // step 4
108 // step2[6] = fdct_round_shift(step3[6] * cospi_8_64 + step3[1] *
109 // cospi_24_64) step2[1] = fdct_round_shift(step3[6] * cospi_24_64 - step3[1]
110 // * cospi_8_64)
111 butterfly_two_coeff(s[6], s[1], cospi_8_64, cospi_24_64, &s[6], &s[1]);
112
113 // step2[2] = fdct_round_shift(step3[2] * cospi_24_64 + step3[5] * cospi_8_64)
114 // step2[5] = fdct_round_shift(step3[2] * cospi_8_64 - step3[5] *
115 // cospi_24_64)
116 butterfly_two_coeff(x[0], x[3], cospi_24_64, cospi_8_64, &s[2], &s[5]);
117
118 // step 5
119 step[0] = vaddq_s16(s[0], s[1]);
120 step[1] = vsubq_s16(s[0], s[1]);
121 step[2] = vaddq_s16(x[1], s[2]);
122 step[3] = vsubq_s16(x[1], s[2]);
123 step[4] = vsubq_s16(x[2], s[5]);
124 step[5] = vaddq_s16(x[2], s[5]);
125 step[6] = vsubq_s16(s[7], s[6]);
126 step[7] = vaddq_s16(s[7], s[6]);
127
128 // step 6
129 // out[9] = fdct_round_shift(step1[6] * cospi_18_64 + step1[1] * cospi_14_64)
130 // out[7] = fdct_round_shift(step1[6] * cospi_14_64 - step1[1] * cospi_18_64)
131 butterfly_two_coeff(step[6], step[1], cospi_18_64, cospi_14_64, &out[9],
132 &out[7]);
133 // out[1] = fdct_round_shift(step1[7] * cospi_2_64 + step1[0] * cospi_30_64)
134 // out[15] = fdct_round_shift(step1[7] * cospi_30_64 - step1[0] * cospi_2_64)
135 butterfly_two_coeff(step[7], step[0], cospi_2_64, cospi_30_64, &out[1],
136 &out[15]);
137
138 // out[13] = fdct_round_shift(step1[4] * cospi_26_64 + step1[3] * cospi_6_64)
139 // out[3] = fdct_round_shift(step1[4] * cospi_6_64 - step1[3] * cospi_26_64)
140 butterfly_two_coeff(step[4], step[3], cospi_26_64, cospi_6_64, &out[13],
141 &out[3]);
142
143 // out[5] = fdct_round_shift(step1[5] * cospi_10_64 + step1[2] * cospi_22_64)
144 // out[11] = fdct_round_shift(step1[5] * cospi_22_64 - step1[2] * cospi_10_64)
145 butterfly_two_coeff(step[5], step[2], cospi_10_64, cospi_22_64, &out[5],
146 &out[11]);
147 }
148
vpx_fdct16x16_neon(const int16_t * input,tran_low_t * output,int stride)149 void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
150 int16x8_t temp0[16];
151 int16x8_t temp1[16];
152 int16x8_t temp2[16];
153 int16x8_t temp3[16];
154
155 // Left half.
156 load_cross(input, stride, temp0);
157 scale_input(temp0, temp1);
158 vpx_fdct8x16_body(temp1, temp0);
159
160 // Right half.
161 load_cross(input + 8, stride, temp1);
162 scale_input(temp1, temp2);
163 vpx_fdct8x16_body(temp2, temp1);
164
165 // Transpose top left and top right quarters into one contiguous location to
166 // process to the top half.
167
168 transpose_s16_8x8q(&temp0[0], &temp2[0]);
169 transpose_s16_8x8q(&temp1[0], &temp2[8]);
170 partial_round_shift(temp2);
171 cross_input(temp2, temp3);
172 vpx_fdct8x16_body(temp3, temp2);
173 transpose_s16_8x8(&temp2[0], &temp2[1], &temp2[2], &temp2[3], &temp2[4],
174 &temp2[5], &temp2[6], &temp2[7]);
175 transpose_s16_8x8(&temp2[8], &temp2[9], &temp2[10], &temp2[11], &temp2[12],
176 &temp2[13], &temp2[14], &temp2[15]);
177 store(output, temp2);
178 store(output + 8, temp2 + 8);
179 output += 8 * 16;
180
181 // Transpose bottom left and bottom right quarters into one contiguous
182 // location to process to the bottom half.
183 transpose_s16_8x8q(&temp0[8], &temp1[0]);
184
185 transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
186 &temp1[13], &temp1[14], &temp1[15]);
187 partial_round_shift(temp1);
188 cross_input(temp1, temp0);
189 vpx_fdct8x16_body(temp0, temp1);
190 transpose_s16_8x8(&temp1[0], &temp1[1], &temp1[2], &temp1[3], &temp1[4],
191 &temp1[5], &temp1[6], &temp1[7]);
192 transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
193 &temp1[13], &temp1[14], &temp1[15]);
194 store(output, temp1);
195 store(output + 8, temp1 + 8);
196 }
197
198 #if CONFIG_VP9_HIGHBITDEPTH
199
200 // Main body of fdct8x16 column
vpx_highbd_fdct8x16_body(int32x4_t * left,int32x4_t * right)201 static void vpx_highbd_fdct8x16_body(int32x4_t *left /*[16]*/,
202 int32x4_t *right /* [16] */) {
203 int32x4_t sl[8];
204 int32x4_t sr[8];
205 int32x4_t xl[4];
206 int32x4_t xr[4];
207 int32x4_t inl[8];
208 int32x4_t inr[8];
209 int32x4_t stepl[8];
210 int32x4_t stepr[8];
211
212 // stage 1
213 // From fwd_txfm.c: Work on the first eight values; fdct8(input,
214 // even_results);"
215 sl[0] = vaddq_s32(left[0], left[7]);
216 sr[0] = vaddq_s32(right[0], right[7]);
217 sl[1] = vaddq_s32(left[1], left[6]);
218 sr[1] = vaddq_s32(right[1], right[6]);
219 sl[2] = vaddq_s32(left[2], left[5]);
220 sr[2] = vaddq_s32(right[2], right[5]);
221 sl[3] = vaddq_s32(left[3], left[4]);
222 sr[3] = vaddq_s32(right[3], right[4]);
223 sl[4] = vsubq_s32(left[3], left[4]);
224 sr[4] = vsubq_s32(right[3], right[4]);
225 sl[5] = vsubq_s32(left[2], left[5]);
226 sr[5] = vsubq_s32(right[2], right[5]);
227 sl[6] = vsubq_s32(left[1], left[6]);
228 sr[6] = vsubq_s32(right[1], right[6]);
229 sl[7] = vsubq_s32(left[0], left[7]);
230 sr[7] = vsubq_s32(right[0], right[7]);
231
232 // Copy values 8-15 as we're storing in-place
233 inl[0] = left[8];
234 inr[0] = right[8];
235 inl[1] = left[9];
236 inr[1] = right[9];
237 inl[2] = left[10];
238 inr[2] = right[10];
239 inl[3] = left[11];
240 inr[3] = right[11];
241 inl[4] = left[12];
242 inr[4] = right[12];
243 inl[5] = left[13];
244 inr[5] = right[13];
245 inl[6] = left[14];
246 inr[6] = right[14];
247 inl[7] = left[15];
248 inr[7] = right[15];
249
250 // fdct4(step, step);
251 xl[0] = vaddq_s32(sl[0], sl[3]);
252 xr[0] = vaddq_s32(sr[0], sr[3]);
253 xl[1] = vaddq_s32(sl[1], sl[2]);
254 xr[1] = vaddq_s32(sr[1], sr[2]);
255 xl[2] = vsubq_s32(sl[1], sl[2]);
256 xr[2] = vsubq_s32(sr[1], sr[2]);
257 xl[3] = vsubq_s32(sl[0], sl[3]);
258 xr[3] = vsubq_s32(sr[0], sr[3]);
259
260 // out[0] = fdct_round_shift((x0 + x1) * cospi_16_64)
261 // out[8] = fdct_round_shift((x0 - x1) * cospi_16_64)
262 butterfly_one_coeff_s32_fast(xl[0], xr[0], xl[1], xr[1], cospi_16_64,
263 &left[0], &right[0], &left[8], &right[8]);
264
265 // out[4] = fdct_round_shift(x3 * cospi_8_64 + x2 * cospi_24_64);
266 // out[12] = fdct_round_shift(x3 * cospi_24_64 - x2 * cospi_8_64);
267 butterfly_two_coeff_s32_s64_narrow(xl[3], xr[3], xl[2], xr[2], cospi_8_64,
268 cospi_24_64, &left[4], &right[4],
269 &left[12], &right[12]);
270
271 // Stage 2
272 // Re-using source s5/s6
273 // s5 = fdct_round_shift((s6 - s5) * cospi_16_64)
274 // s6 = fdct_round_shift((s6 + s5) * cospi_16_64)
275 butterfly_one_coeff_s32_fast(sl[6], sr[6], sl[5], sr[5], cospi_16_64, &sl[6],
276 &sr[6], &sl[5], &sr[5]);
277
278 // Stage 3
279 xl[0] = vaddq_s32(sl[4], sl[5]);
280 xr[0] = vaddq_s32(sr[4], sr[5]);
281 xl[1] = vsubq_s32(sl[4], sl[5]);
282 xr[1] = vsubq_s32(sr[4], sr[5]);
283 xl[2] = vsubq_s32(sl[7], sl[6]);
284 xr[2] = vsubq_s32(sr[7], sr[6]);
285 xl[3] = vaddq_s32(sl[7], sl[6]);
286 xr[3] = vaddq_s32(sr[7], sr[6]);
287
288 // Stage 4
289 // out[2] = fdct_round_shift(x3 * cospi_4_64 + x0 * cospi_28_64)
290 // out[14] = fdct_round_shift(x3 * cospi_28_64 - x0 * cospi_4_64)
291 butterfly_two_coeff_s32_s64_narrow(xl[3], xr[3], xl[0], xr[0], cospi_4_64,
292 cospi_28_64, &left[2], &right[2],
293 &left[14], &right[14]);
294 // out[6] = fdct_round_shift(x2 * cospi_20_64 + x1 * cospi_12_64)
295 // out[10] = fdct_round_shift(x2 * cospi_12_64 - x1 * cospi_20_64)
296 butterfly_two_coeff_s32_s64_narrow(xl[2], xr[2], xl[1], xr[1], cospi_20_64,
297 cospi_12_64, &left[10], &right[10],
298 &left[6], &right[6]);
299
300 // step 2
301 // From fwd_txfm.c: Work on the next eight values; step1 -> odd_results"
302 // That file distinguished between "in_high" and "step1" but the only
303 // difference is that "in_high" is the first 8 values and "step 1" is the
304 // second. Here, since they are all in one array, "step1" values are += 8.
305
306 // step2[2] = fdct_round_shift((step1[5] - step1[2]) * cospi_16_64)
307 // step2[3] = fdct_round_shift((step1[4] - step1[3]) * cospi_16_64)
308 // step2[4] = fdct_round_shift((step1[4] + step1[3]) * cospi_16_64)
309 // step2[5] = fdct_round_shift((step1[5] + step1[2]) * cospi_16_64)
310 butterfly_one_coeff_s32_fast(inl[5], inr[5], inl[2], inr[2], cospi_16_64,
311 &sl[5], &sr[5], &sl[2], &sr[2]);
312 butterfly_one_coeff_s32_fast(inl[4], inr[4], inl[3], inr[3], cospi_16_64,
313 &sl[4], &sr[4], &sl[3], &sr[3]);
314
315 // step 3
316 sl[0] = vaddq_s32(inl[0], sl[3]);
317 sr[0] = vaddq_s32(inr[0], sr[3]);
318 sl[1] = vaddq_s32(inl[1], sl[2]);
319 sr[1] = vaddq_s32(inr[1], sr[2]);
320 xl[0] = vsubq_s32(inl[1], sl[2]);
321 xr[0] = vsubq_s32(inr[1], sr[2]);
322 xl[1] = vsubq_s32(inl[0], sl[3]);
323 xr[1] = vsubq_s32(inr[0], sr[3]);
324 xl[2] = vsubq_s32(inl[7], sl[4]);
325 xr[2] = vsubq_s32(inr[7], sr[4]);
326 xl[3] = vsubq_s32(inl[6], sl[5]);
327 xr[3] = vsubq_s32(inr[6], sr[5]);
328 sl[6] = vaddq_s32(inl[6], sl[5]);
329 sr[6] = vaddq_s32(inr[6], sr[5]);
330 sl[7] = vaddq_s32(inl[7], sl[4]);
331 sr[7] = vaddq_s32(inr[7], sr[4]);
332
333 // step 4
334 // step2[6] = fdct_round_shift(step3[6] * cospi_8_64 + step3[1] *
335 // cospi_24_64) step2[1] = fdct_round_shift(step3[6] * cospi_24_64 - step3[1]
336 // * cospi_8_64)
337 butterfly_two_coeff_s32_s64_narrow(sl[6], sr[6], sl[1], sr[1], cospi_8_64,
338 cospi_24_64, &sl[6], &sr[6], &sl[1],
339 &sr[1]);
340 // step2[2] = fdct_round_shift(step3[2] * cospi_24_64 + step3[5] * cospi_8_64)
341 // step2[5] = fdct_round_shift(step3[2] * cospi_8_64 - step3[5] *
342 // cospi_24_64)
343 butterfly_two_coeff_s32_s64_narrow(xl[0], xr[0], xl[3], xr[3], cospi_24_64,
344 cospi_8_64, &sl[2], &sr[2], &sl[5],
345 &sr[5]);
346
347 // step 5
348 stepl[0] = vaddq_s32(sl[0], sl[1]);
349 stepr[0] = vaddq_s32(sr[0], sr[1]);
350 stepl[1] = vsubq_s32(sl[0], sl[1]);
351 stepr[1] = vsubq_s32(sr[0], sr[1]);
352 stepl[2] = vaddq_s32(xl[1], sl[2]);
353 stepr[2] = vaddq_s32(xr[1], sr[2]);
354 stepl[3] = vsubq_s32(xl[1], sl[2]);
355 stepr[3] = vsubq_s32(xr[1], sr[2]);
356 stepl[4] = vsubq_s32(xl[2], sl[5]);
357 stepr[4] = vsubq_s32(xr[2], sr[5]);
358 stepl[5] = vaddq_s32(xl[2], sl[5]);
359 stepr[5] = vaddq_s32(xr[2], sr[5]);
360 stepl[6] = vsubq_s32(sl[7], sl[6]);
361 stepr[6] = vsubq_s32(sr[7], sr[6]);
362 stepl[7] = vaddq_s32(sl[7], sl[6]);
363 stepr[7] = vaddq_s32(sr[7], sr[6]);
364
365 // step 6
366 // out[9] = fdct_round_shift(step1[6] * cospi_18_64 + step1[1] * cospi_14_64)
367 // out[7] = fdct_round_shift(step1[6] * cospi_14_64 - step1[1] * cospi_18_64)
368 butterfly_two_coeff_s32_s64_narrow(stepl[6], stepr[6], stepl[1], stepr[1],
369 cospi_18_64, cospi_14_64, &left[9],
370 &right[9], &left[7], &right[7]);
371 // out[1] = fdct_round_shift(step1[7] * cospi_2_64 + step1[0] * cospi_30_64)
372 // out[15] = fdct_round_shift(step1[7] * cospi_30_64 - step1[0] * cospi_2_64)
373 butterfly_two_coeff_s32_s64_narrow(stepl[7], stepr[7], stepl[0], stepr[0],
374 cospi_2_64, cospi_30_64, &left[1],
375 &right[1], &left[15], &right[15]);
376 // out[13] = fdct_round_shift(step1[4] * cospi_26_64 + step1[3] * cospi_6_64)
377 // out[3] = fdct_round_shift(step1[4] * cospi_6_64 - step1[3] * cospi_26_64)
378 butterfly_two_coeff_s32_s64_narrow(stepl[4], stepr[4], stepl[3], stepr[3],
379 cospi_26_64, cospi_6_64, &left[13],
380 &right[13], &left[3], &right[3]);
381 // out[5] = fdct_round_shift(step1[5] * cospi_10_64 + step1[2] * cospi_22_64)
382 // out[11] = fdct_round_shift(step1[5] * cospi_22_64 - step1[2] * cospi_10_64)
383 butterfly_two_coeff_s32_s64_narrow(stepl[5], stepr[5], stepl[2], stepr[2],
384 cospi_10_64, cospi_22_64, &left[5],
385 &right[5], &left[11], &right[11]);
386 }
387
vpx_highbd_fdct16x16_neon(const int16_t * input,tran_low_t * output,int stride)388 void vpx_highbd_fdct16x16_neon(const int16_t *input, tran_low_t *output,
389 int stride) {
390 int16x8_t temp0[16];
391 int32x4_t left1[16], left2[16], left3[16], left4[16], right1[16], right2[16],
392 right3[16], right4[16];
393
394 // Left half.
395 load_cross(input, stride, temp0);
396 highbd_scale_input(temp0, left1, right1);
397 vpx_highbd_fdct8x16_body(left1, right1);
398
399 // right half.
400 load_cross(input + 8, stride, temp0);
401 highbd_scale_input(temp0, left2, right2);
402 vpx_highbd_fdct8x16_body(left2, right2);
403
404 // Transpose top left and top right quarters into one contiguous location to
405 // process to the top half.
406
407 transpose_s32_8x8_2(left1, right1, left3, right3);
408 transpose_s32_8x8_2(left2, right2, left3 + 8, right3 + 8);
409 transpose_s32_8x8_2(left1 + 8, right1 + 8, left4, right4);
410 transpose_s32_8x8_2(left2 + 8, right2 + 8, left4 + 8, right4 + 8);
411
412 highbd_partial_round_shift(left3, right3);
413 highbd_cross_input(left3, right3, left1, right1);
414 vpx_highbd_fdct8x16_body(left1, right1);
415
416 // Transpose bottom left and bottom right quarters into one contiguous
417 // location to process to the bottom half.
418
419 highbd_partial_round_shift(left4, right4);
420 highbd_cross_input(left4, right4, left2, right2);
421 vpx_highbd_fdct8x16_body(left2, right2);
422
423 transpose_s32_8x8_2(left1, right1, left3, right3);
424 transpose_s32_8x8_2(left2, right2, left3 + 8, right3 + 8);
425 transpose_s32_8x8_2(left1 + 8, right1 + 8, left4, right4);
426 transpose_s32_8x8_2(left2 + 8, right2 + 8, left4 + 8, right4 + 8);
427 store16_s32(output, left3);
428 output += 4;
429 store16_s32(output, right3);
430 output += 4;
431
432 store16_s32(output, left4);
433 output += 4;
434 store16_s32(output, right4);
435 }
436 #endif // CONFIG_VP9_HIGHBITDEPTH
437
438 #endif // !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) &&
439 // __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4
440