1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/txfm_common.h"
16 #include "vpx_dsp/vpx_dsp_common.h"
17 #include "vpx_dsp/arm/idct_neon.h"
18 #include "vpx_dsp/arm/fdct_neon.h"
19 #include "vpx_dsp/arm/mem_neon.h"
20 #include "vpx_dsp/arm/fdct8x8_neon.h"
21
vpx_fdct8x8_neon(const int16_t * input,tran_low_t * final_output,int stride)22 void vpx_fdct8x8_neon(const int16_t *input, tran_low_t *final_output,
23 int stride) {
24 // stage 1
25 int16x8_t in[8];
26 in[0] = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2);
27 in[1] = vshlq_n_s16(vld1q_s16(&input[1 * stride]), 2);
28 in[2] = vshlq_n_s16(vld1q_s16(&input[2 * stride]), 2);
29 in[3] = vshlq_n_s16(vld1q_s16(&input[3 * stride]), 2);
30 in[4] = vshlq_n_s16(vld1q_s16(&input[4 * stride]), 2);
31 in[5] = vshlq_n_s16(vld1q_s16(&input[5 * stride]), 2);
32 in[6] = vshlq_n_s16(vld1q_s16(&input[6 * stride]), 2);
33 in[7] = vshlq_n_s16(vld1q_s16(&input[7 * stride]), 2);
34
35 vpx_fdct8x8_pass1_neon(in);
36 vpx_fdct8x8_pass2_neon(in);
37 {
38 // from vpx_dct_sse2.c
39 // Post-condition (division by two)
40 // division of two 16 bits signed numbers using shifts
41 // n / 2 = (n - (n >> 15)) >> 1
42 const int16x8_t sign_in0 = vshrq_n_s16(in[0], 15);
43 const int16x8_t sign_in1 = vshrq_n_s16(in[1], 15);
44 const int16x8_t sign_in2 = vshrq_n_s16(in[2], 15);
45 const int16x8_t sign_in3 = vshrq_n_s16(in[3], 15);
46 const int16x8_t sign_in4 = vshrq_n_s16(in[4], 15);
47 const int16x8_t sign_in5 = vshrq_n_s16(in[5], 15);
48 const int16x8_t sign_in6 = vshrq_n_s16(in[6], 15);
49 const int16x8_t sign_in7 = vshrq_n_s16(in[7], 15);
50 in[0] = vhsubq_s16(in[0], sign_in0);
51 in[1] = vhsubq_s16(in[1], sign_in1);
52 in[2] = vhsubq_s16(in[2], sign_in2);
53 in[3] = vhsubq_s16(in[3], sign_in3);
54 in[4] = vhsubq_s16(in[4], sign_in4);
55 in[5] = vhsubq_s16(in[5], sign_in5);
56 in[6] = vhsubq_s16(in[6], sign_in6);
57 in[7] = vhsubq_s16(in[7], sign_in7);
58 // store results
59 store_s16q_to_tran_low(final_output + 0 * 8, in[0]);
60 store_s16q_to_tran_low(final_output + 1 * 8, in[1]);
61 store_s16q_to_tran_low(final_output + 2 * 8, in[2]);
62 store_s16q_to_tran_low(final_output + 3 * 8, in[3]);
63 store_s16q_to_tran_low(final_output + 4 * 8, in[4]);
64 store_s16q_to_tran_low(final_output + 5 * 8, in[5]);
65 store_s16q_to_tran_low(final_output + 6 * 8, in[6]);
66 store_s16q_to_tran_low(final_output + 7 * 8, in[7]);
67 }
68 }
69
70 #if CONFIG_VP9_HIGHBITDEPTH
71
vpx_highbd_fdct8x8_neon(const int16_t * input,tran_low_t * final_output,int stride)72 void vpx_highbd_fdct8x8_neon(const int16_t *input, tran_low_t *final_output,
73 int stride) {
74 // input[M * stride] * 16
75 int32x4_t left[8], right[8];
76 int16x8_t in[8];
77 in[0] = vld1q_s16(input + 0 * stride);
78 in[1] = vld1q_s16(input + 1 * stride);
79 in[2] = vld1q_s16(input + 2 * stride);
80 in[3] = vld1q_s16(input + 3 * stride);
81 in[4] = vld1q_s16(input + 4 * stride);
82 in[5] = vld1q_s16(input + 5 * stride);
83 in[6] = vld1q_s16(input + 6 * stride);
84 in[7] = vld1q_s16(input + 7 * stride);
85
86 left[0] = vshll_n_s16(vget_low_s16(in[0]), 2);
87 left[1] = vshll_n_s16(vget_low_s16(in[1]), 2);
88 left[2] = vshll_n_s16(vget_low_s16(in[2]), 2);
89 left[3] = vshll_n_s16(vget_low_s16(in[3]), 2);
90 left[4] = vshll_n_s16(vget_low_s16(in[4]), 2);
91 left[5] = vshll_n_s16(vget_low_s16(in[5]), 2);
92 left[6] = vshll_n_s16(vget_low_s16(in[6]), 2);
93 left[7] = vshll_n_s16(vget_low_s16(in[7]), 2);
94 right[0] = vshll_n_s16(vget_high_s16(in[0]), 2);
95 right[1] = vshll_n_s16(vget_high_s16(in[1]), 2);
96 right[2] = vshll_n_s16(vget_high_s16(in[2]), 2);
97 right[3] = vshll_n_s16(vget_high_s16(in[3]), 2);
98 right[4] = vshll_n_s16(vget_high_s16(in[4]), 2);
99 right[5] = vshll_n_s16(vget_high_s16(in[5]), 2);
100 right[6] = vshll_n_s16(vget_high_s16(in[6]), 2);
101 right[7] = vshll_n_s16(vget_high_s16(in[7]), 2);
102
103 vpx_highbd_fdct8x8_pass1_neon(left, right);
104 vpx_highbd_fdct8x8_pass2_neon(left, right);
105 {
106 left[0] = add_round_shift_half_s32(left[0]);
107 left[1] = add_round_shift_half_s32(left[1]);
108 left[2] = add_round_shift_half_s32(left[2]);
109 left[3] = add_round_shift_half_s32(left[3]);
110 left[4] = add_round_shift_half_s32(left[4]);
111 left[5] = add_round_shift_half_s32(left[5]);
112 left[6] = add_round_shift_half_s32(left[6]);
113 left[7] = add_round_shift_half_s32(left[7]);
114 right[0] = add_round_shift_half_s32(right[0]);
115 right[1] = add_round_shift_half_s32(right[1]);
116 right[2] = add_round_shift_half_s32(right[2]);
117 right[3] = add_round_shift_half_s32(right[3]);
118 right[4] = add_round_shift_half_s32(right[4]);
119 right[5] = add_round_shift_half_s32(right[5]);
120 right[6] = add_round_shift_half_s32(right[6]);
121 right[7] = add_round_shift_half_s32(right[7]);
122
123 // store results
124 vst1q_s32(final_output, left[0]);
125 vst1q_s32(final_output + 4, right[0]);
126 vst1q_s32(final_output + 8, left[1]);
127 vst1q_s32(final_output + 12, right[1]);
128 vst1q_s32(final_output + 16, left[2]);
129 vst1q_s32(final_output + 20, right[2]);
130 vst1q_s32(final_output + 24, left[3]);
131 vst1q_s32(final_output + 28, right[3]);
132 vst1q_s32(final_output + 32, left[4]);
133 vst1q_s32(final_output + 36, right[4]);
134 vst1q_s32(final_output + 40, left[5]);
135 vst1q_s32(final_output + 44, right[5]);
136 vst1q_s32(final_output + 48, left[6]);
137 vst1q_s32(final_output + 52, right[6]);
138 vst1q_s32(final_output + 56, left[7]);
139 vst1q_s32(final_output + 60, right[7]);
140 }
141 }
142
143 #endif // CONFIG_VP9_HIGHBITDEPTH
144