1*a58d3d2aSXin Li /***********************************************************************
2*a58d3d2aSXin Li Copyright (C) 2014 Vidyo
3*a58d3d2aSXin Li Redistribution and use in source and binary forms, with or without
4*a58d3d2aSXin Li modification, are permitted provided that the following conditions
5*a58d3d2aSXin Li are met:
6*a58d3d2aSXin Li - Redistributions of source code must retain the above copyright notice,
7*a58d3d2aSXin Li this list of conditions and the following disclaimer.
8*a58d3d2aSXin Li - Redistributions in binary form must reproduce the above copyright
9*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer in the
10*a58d3d2aSXin Li documentation and/or other materials provided with the distribution.
11*a58d3d2aSXin Li - Neither the name of Internet Society, IETF or IETF Trust, nor the
12*a58d3d2aSXin Li names of specific contributors, may be used to endorse or promote
13*a58d3d2aSXin Li products derived from this software without specific prior written
14*a58d3d2aSXin Li permission.
15*a58d3d2aSXin Li THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16*a58d3d2aSXin Li AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*a58d3d2aSXin Li IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*a58d3d2aSXin Li ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19*a58d3d2aSXin Li LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20*a58d3d2aSXin Li CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21*a58d3d2aSXin Li SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22*a58d3d2aSXin Li INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23*a58d3d2aSXin Li CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24*a58d3d2aSXin Li ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25*a58d3d2aSXin Li POSSIBILITY OF SUCH DAMAGE.
26*a58d3d2aSXin Li ***********************************************************************/
27*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
28*a58d3d2aSXin Li #include "config.h"
29*a58d3d2aSXin Li #endif
30*a58d3d2aSXin Li
31*a58d3d2aSXin Li #include <arm_neon.h>
32*a58d3d2aSXin Li #include "main.h"
33*a58d3d2aSXin Li #include "stack_alloc.h"
34*a58d3d2aSXin Li #include "NSQ.h"
35*a58d3d2aSXin Li #include "celt/cpu_support.h"
36*a58d3d2aSXin Li #include "celt/arm/armcpu.h"
37*a58d3d2aSXin Li
silk_noise_shape_quantizer_short_prediction_neon(const opus_int32 * buf32,const opus_int32 * coef32,opus_int order)38*a58d3d2aSXin Li opus_int32 silk_noise_shape_quantizer_short_prediction_neon(const opus_int32 *buf32, const opus_int32 *coef32, opus_int order)
39*a58d3d2aSXin Li {
40*a58d3d2aSXin Li int32x4_t coef0 = vld1q_s32(coef32);
41*a58d3d2aSXin Li int32x4_t coef1 = vld1q_s32(coef32 + 4);
42*a58d3d2aSXin Li int32x4_t coef2 = vld1q_s32(coef32 + 8);
43*a58d3d2aSXin Li int32x4_t coef3 = vld1q_s32(coef32 + 12);
44*a58d3d2aSXin Li
45*a58d3d2aSXin Li int32x4_t a0 = vld1q_s32(buf32 - 15);
46*a58d3d2aSXin Li int32x4_t a1 = vld1q_s32(buf32 - 11);
47*a58d3d2aSXin Li int32x4_t a2 = vld1q_s32(buf32 - 7);
48*a58d3d2aSXin Li int32x4_t a3 = vld1q_s32(buf32 - 3);
49*a58d3d2aSXin Li
50*a58d3d2aSXin Li int32x4_t b0 = vqdmulhq_s32(coef0, a0);
51*a58d3d2aSXin Li int32x4_t b1 = vqdmulhq_s32(coef1, a1);
52*a58d3d2aSXin Li int32x4_t b2 = vqdmulhq_s32(coef2, a2);
53*a58d3d2aSXin Li int32x4_t b3 = vqdmulhq_s32(coef3, a3);
54*a58d3d2aSXin Li
55*a58d3d2aSXin Li int32x4_t c0 = vaddq_s32(b0, b1);
56*a58d3d2aSXin Li int32x4_t c1 = vaddq_s32(b2, b3);
57*a58d3d2aSXin Li
58*a58d3d2aSXin Li int32x4_t d = vaddq_s32(c0, c1);
59*a58d3d2aSXin Li
60*a58d3d2aSXin Li int64x2_t e = vpaddlq_s32(d);
61*a58d3d2aSXin Li
62*a58d3d2aSXin Li int64x1_t f = vadd_s64(vget_low_s64(e), vget_high_s64(e));
63*a58d3d2aSXin Li
64*a58d3d2aSXin Li opus_int32 out = vget_lane_s32(vreinterpret_s32_s64(f), 0);
65*a58d3d2aSXin Li
66*a58d3d2aSXin Li out += silk_RSHIFT( order, 1 );
67*a58d3d2aSXin Li
68*a58d3d2aSXin Li return out;
69*a58d3d2aSXin Li }
70*a58d3d2aSXin Li
71*a58d3d2aSXin Li
silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 * data0,opus_int32 * data1,const opus_int16 * coef,opus_int order)72*a58d3d2aSXin Li opus_int32 silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef, opus_int order)
73*a58d3d2aSXin Li {
74*a58d3d2aSXin Li opus_int32 out;
75*a58d3d2aSXin Li if (order == 8)
76*a58d3d2aSXin Li {
77*a58d3d2aSXin Li int32x4_t a00 = vdupq_n_s32(data0[0]);
78*a58d3d2aSXin Li int32x4_t a01 = vld1q_s32(data1); /* data1[0] ... [3] */
79*a58d3d2aSXin Li
80*a58d3d2aSXin Li int32x4_t a0 = vextq_s32 (a00, a01, 3); /* data0[0] data1[0] ...[2] */
81*a58d3d2aSXin Li int32x4_t a1 = vld1q_s32(data1 + 3); /* data1[3] ... [6] */
82*a58d3d2aSXin Li
83*a58d3d2aSXin Li /*TODO: Convert these once in advance instead of once per sample, like
84*a58d3d2aSXin Li silk_noise_shape_quantizer_short_prediction_neon() does.*/
85*a58d3d2aSXin Li int16x8_t coef16 = vld1q_s16(coef);
86*a58d3d2aSXin Li int32x4_t coef0 = vmovl_s16(vget_low_s16(coef16));
87*a58d3d2aSXin Li int32x4_t coef1 = vmovl_s16(vget_high_s16(coef16));
88*a58d3d2aSXin Li
89*a58d3d2aSXin Li /*This is not bit-exact with the C version, since we do not drop the
90*a58d3d2aSXin Li lower 16 bits of each multiply, but wait until the end to truncate
91*a58d3d2aSXin Li precision. This is an encoder-specific calculation (and unlike
92*a58d3d2aSXin Li silk_noise_shape_quantizer_short_prediction_neon(), is not meant to
93*a58d3d2aSXin Li simulate what the decoder will do). We still could use vqdmulhq_s32()
94*a58d3d2aSXin Li like silk_noise_shape_quantizer_short_prediction_neon() and save
95*a58d3d2aSXin Li half the multiplies, but the speed difference is not large, since we
96*a58d3d2aSXin Li then need two extra adds.*/
97*a58d3d2aSXin Li int64x2_t b0 = vmull_s32(vget_low_s32(a0), vget_low_s32(coef0));
98*a58d3d2aSXin Li int64x2_t b1 = vmlal_s32(b0, vget_high_s32(a0), vget_high_s32(coef0));
99*a58d3d2aSXin Li int64x2_t b2 = vmlal_s32(b1, vget_low_s32(a1), vget_low_s32(coef1));
100*a58d3d2aSXin Li int64x2_t b3 = vmlal_s32(b2, vget_high_s32(a1), vget_high_s32(coef1));
101*a58d3d2aSXin Li
102*a58d3d2aSXin Li int64x1_t c = vadd_s64(vget_low_s64(b3), vget_high_s64(b3));
103*a58d3d2aSXin Li int64x1_t cS = vrshr_n_s64(c, 15);
104*a58d3d2aSXin Li int32x2_t d = vreinterpret_s32_s64(cS);
105*a58d3d2aSXin Li
106*a58d3d2aSXin Li out = vget_lane_s32(d, 0);
107*a58d3d2aSXin Li vst1q_s32(data1, a0);
108*a58d3d2aSXin Li vst1q_s32(data1 + 4, a1);
109*a58d3d2aSXin Li return out;
110*a58d3d2aSXin Li }
111*a58d3d2aSXin Li return silk_NSQ_noise_shape_feedback_loop_c(data0, data1, coef, order);
112*a58d3d2aSXin Li }
113