xref: /aosp_15_r20/external/libvpx/vpx_dsp/ppc/quantize_vsx.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/ppc/types_vsx.h"
15 
16 // Negate 16-bit integers in a when the corresponding signed 16-bit
17 // integer in b is negative.
vec_sign(int16x8_t a,int16x8_t b)18 static INLINE int16x8_t vec_sign(int16x8_t a, int16x8_t b) {
19   const int16x8_t mask = vec_sra(b, vec_shift_sign_s16);
20   return vec_xor(vec_add(a, mask), mask);
21 }
22 
23 // Sets the value of a 32-bit integers to 1 when the corresponding value in a is
24 // negative.
vec_is_neg(int32x4_t a)25 static INLINE int32x4_t vec_is_neg(int32x4_t a) {
26   return vec_sr(a, vec_shift_sign_s32);
27 }
28 
29 // Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit
30 // integers, and return the high 16 bits of the intermediate integers.
31 // (a * b) >> 16
vec_mulhi(int16x8_t a,int16x8_t b)32 static INLINE int16x8_t vec_mulhi(int16x8_t a, int16x8_t b) {
33   // madds does ((A * B) >>15) + C, we need >> 16, so we perform an extra right
34   // shift.
35   return vec_sra(vec_madds(a, b, vec_zeros_s16), vec_ones_u16);
36 }
37 
38 // Quantization function used for 4x4, 8x8 and 16x16 blocks.
quantize_coeff(int16x8_t coeff,int16x8_t coeff_abs,int16x8_t round,int16x8_t quant,int16x8_t quant_shift,bool16x8_t mask)39 static INLINE int16x8_t quantize_coeff(int16x8_t coeff, int16x8_t coeff_abs,
40                                        int16x8_t round, int16x8_t quant,
41                                        int16x8_t quant_shift, bool16x8_t mask) {
42   const int16x8_t rounded = vec_vaddshs(coeff_abs, round);
43   int16x8_t qcoeff = vec_mulhi(rounded, quant);
44   qcoeff = vec_add(qcoeff, rounded);
45   qcoeff = vec_mulhi(qcoeff, quant_shift);
46   qcoeff = vec_sign(qcoeff, coeff);
47   return vec_and(qcoeff, mask);
48 }
49 
50 // Quantization function used for 32x32 blocks.
quantize_coeff_32(int16x8_t coeff,int16x8_t coeff_abs,int16x8_t round,int16x8_t quant,int16x8_t quant_shift,bool16x8_t mask)51 static INLINE int16x8_t quantize_coeff_32(int16x8_t coeff, int16x8_t coeff_abs,
52                                           int16x8_t round, int16x8_t quant,
53                                           int16x8_t quant_shift,
54                                           bool16x8_t mask) {
55   const int16x8_t rounded = vec_vaddshs(coeff_abs, round);
56   int16x8_t qcoeff = vec_mulhi(rounded, quant);
57   qcoeff = vec_add(qcoeff, rounded);
58   // 32x32 blocks require an extra multiplication by 2, this compensates for the
59   // extra right shift added in vec_mulhi, as such vec_madds can be used
60   // directly instead of vec_mulhi (((a * b) >> 15) >> 1) << 1 == (a * b >> 15)
61   qcoeff = vec_madds(qcoeff, quant_shift, vec_zeros_s16);
62   qcoeff = vec_sign(qcoeff, coeff);
63   return vec_and(qcoeff, mask);
64 }
65 
66 // DeQuantization function used for 32x32 blocks. Quantized coeff of 32x32
67 // blocks are twice as big as for other block sizes. As such, using
68 // vec_mladd results in overflow.
dequantize_coeff_32(int16x8_t qcoeff,int16x8_t dequant)69 static INLINE int16x8_t dequantize_coeff_32(int16x8_t qcoeff,
70                                             int16x8_t dequant) {
71   int32x4_t dqcoeffe = vec_mule(qcoeff, dequant);
72   int32x4_t dqcoeffo = vec_mulo(qcoeff, dequant);
73   // Add 1 if negative to round towards zero because the C uses division.
74   dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe));
75   dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo));
76   dqcoeffe = vec_sra(dqcoeffe, vec_ones_u32);
77   dqcoeffo = vec_sra(dqcoeffo, vec_ones_u32);
78   return (int16x8_t)vec_perm(dqcoeffe, dqcoeffo, vec_perm_odd_even_pack);
79 }
80 
nonzero_scanindex(int16x8_t qcoeff,const int16_t * iscan_ptr,int index)81 static INLINE int16x8_t nonzero_scanindex(int16x8_t qcoeff,
82                                           const int16_t *iscan_ptr, int index) {
83   int16x8_t scan = vec_vsx_ld(index, iscan_ptr);
84   bool16x8_t zero_coeff = vec_cmpeq(qcoeff, vec_zeros_s16);
85   return vec_andc(scan, zero_coeff);
86 }
87 
88 // Compare packed 16-bit integers across a, and return the maximum value in
89 // every element. Returns a vector containing the biggest value across vector a.
vec_max_across(int16x8_t a)90 static INLINE int16x8_t vec_max_across(int16x8_t a) {
91   a = vec_max(a, vec_perm(a, a, vec_perm64));
92   a = vec_max(a, vec_perm(a, a, vec_perm32));
93   return vec_max(a, vec_perm(a, a, vec_perm16));
94 }
95 
vpx_quantize_b_vsx(const tran_low_t * coeff_ptr,intptr_t n_coeffs,const int16_t * zbin_ptr,const int16_t * round_ptr,const int16_t * quant_ptr,const int16_t * quant_shift_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const int16_t * scan_ptr,const int16_t * iscan_ptr)96 void vpx_quantize_b_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
97                         const int16_t *zbin_ptr, const int16_t *round_ptr,
98                         const int16_t *quant_ptr,
99                         const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
100                         tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
101                         uint16_t *eob_ptr, const int16_t *scan_ptr,
102                         const int16_t *iscan_ptr) {
103   int16x8_t qcoeff0, qcoeff1, dqcoeff0, dqcoeff1, eob;
104   bool16x8_t zero_mask0, zero_mask1;
105 
106   // First set of 8 coeff starts with DC + 7 AC
107   int16x8_t zbin = vec_vsx_ld(0, zbin_ptr);
108   int16x8_t round = vec_vsx_ld(0, round_ptr);
109   int16x8_t quant = vec_vsx_ld(0, quant_ptr);
110   int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
111   int16x8_t quant_shift = vec_vsx_ld(0, quant_shift_ptr);
112 
113   int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
114   int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
115 
116   int16x8_t coeff0_abs = vec_abs(coeff0);
117   int16x8_t coeff1_abs = vec_abs(coeff1);
118 
119   zero_mask0 = vec_cmpge(coeff0_abs, zbin);
120   zbin = vec_splat(zbin, 1);
121   zero_mask1 = vec_cmpge(coeff1_abs, zbin);
122 
123   (void)scan_ptr;
124 
125   qcoeff0 =
126       quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift, zero_mask0);
127   vec_vsx_st(qcoeff0, 0, qcoeff_ptr);
128   round = vec_splat(round, 1);
129   quant = vec_splat(quant, 1);
130   quant_shift = vec_splat(quant_shift, 1);
131   qcoeff1 =
132       quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift, zero_mask1);
133   vec_vsx_st(qcoeff1, 16, qcoeff_ptr);
134 
135   dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16);
136   vec_vsx_st(dqcoeff0, 0, dqcoeff_ptr);
137   dequant = vec_splat(dequant, 1);
138   dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16);
139   vec_vsx_st(dqcoeff1, 16, dqcoeff_ptr);
140 
141   eob = vec_max(nonzero_scanindex(qcoeff0, iscan_ptr, 0),
142                 nonzero_scanindex(qcoeff1, iscan_ptr, 16));
143 
144   if (n_coeffs > 16) {
145     int index = 16;
146     int off0 = 32;
147     int off1 = 48;
148     int off2 = 64;
149     do {
150       int16x8_t coeff2, coeff2_abs, qcoeff2, dqcoeff2, eob2;
151       bool16x8_t zero_mask2;
152       coeff0 = vec_vsx_ld(off0, coeff_ptr);
153       coeff1 = vec_vsx_ld(off1, coeff_ptr);
154       coeff2 = vec_vsx_ld(off2, coeff_ptr);
155       coeff0_abs = vec_abs(coeff0);
156       coeff1_abs = vec_abs(coeff1);
157       coeff2_abs = vec_abs(coeff2);
158       zero_mask0 = vec_cmpge(coeff0_abs, zbin);
159       zero_mask1 = vec_cmpge(coeff1_abs, zbin);
160       zero_mask2 = vec_cmpge(coeff2_abs, zbin);
161       qcoeff0 = quantize_coeff(coeff0, coeff0_abs, round, quant, quant_shift,
162                                zero_mask0);
163       qcoeff1 = quantize_coeff(coeff1, coeff1_abs, round, quant, quant_shift,
164                                zero_mask1);
165       qcoeff2 = quantize_coeff(coeff2, coeff2_abs, round, quant, quant_shift,
166                                zero_mask2);
167       vec_vsx_st(qcoeff0, off0, qcoeff_ptr);
168       vec_vsx_st(qcoeff1, off1, qcoeff_ptr);
169       vec_vsx_st(qcoeff2, off2, qcoeff_ptr);
170 
171       dqcoeff0 = vec_mladd(qcoeff0, dequant, vec_zeros_s16);
172       dqcoeff1 = vec_mladd(qcoeff1, dequant, vec_zeros_s16);
173       dqcoeff2 = vec_mladd(qcoeff2, dequant, vec_zeros_s16);
174 
175       vec_vsx_st(dqcoeff0, off0, dqcoeff_ptr);
176       vec_vsx_st(dqcoeff1, off1, dqcoeff_ptr);
177       vec_vsx_st(dqcoeff2, off2, dqcoeff_ptr);
178 
179       eob = vec_max(eob, nonzero_scanindex(qcoeff0, iscan_ptr, off0));
180       eob2 = vec_max(nonzero_scanindex(qcoeff1, iscan_ptr, off1),
181                      nonzero_scanindex(qcoeff2, iscan_ptr, off2));
182       eob = vec_max(eob, eob2);
183 
184       index += 24;
185       off0 += 48;
186       off1 += 48;
187       off2 += 48;
188     } while (index < n_coeffs);
189   }
190 
191   eob = vec_max_across(eob);
192   *eob_ptr = eob[0];
193 }
194 
vpx_quantize_b_32x32_vsx(const tran_low_t * coeff_ptr,intptr_t n_coeffs,const int16_t * zbin_ptr,const int16_t * round_ptr,const int16_t * quant_ptr,const int16_t * quant_shift_ptr,tran_low_t * qcoeff_ptr,tran_low_t * dqcoeff_ptr,const int16_t * dequant_ptr,uint16_t * eob_ptr,const int16_t * scan_ptr,const int16_t * iscan_ptr)195 void vpx_quantize_b_32x32_vsx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
196                               const int16_t *zbin_ptr, const int16_t *round_ptr,
197                               const int16_t *quant_ptr,
198                               const int16_t *quant_shift_ptr,
199                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
200                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
201                               const int16_t *scan_ptr,
202                               const int16_t *iscan_ptr) {
203   // In stage 1, we quantize 16 coeffs (DC + 15 AC)
204   // In stage 2, we loop 42 times and quantize 24 coeffs per iteration
205   // (32 * 32 - 16) / 24 = 42
206   int num_itr = 42;
207   // Offsets are in bytes, 16 coeffs = 32 bytes
208   int off0 = 32;
209   int off1 = 48;
210   int off2 = 64;
211 
212   int16x8_t qcoeff0, qcoeff1, eob;
213   bool16x8_t zero_mask0, zero_mask1;
214 
215   int16x8_t zbin = vec_vsx_ld(0, zbin_ptr);
216   int16x8_t round = vec_vsx_ld(0, round_ptr);
217   int16x8_t quant = vec_vsx_ld(0, quant_ptr);
218   int16x8_t dequant = vec_vsx_ld(0, dequant_ptr);
219   int16x8_t quant_shift = vec_vsx_ld(0, quant_shift_ptr);
220 
221   int16x8_t coeff0 = vec_vsx_ld(0, coeff_ptr);
222   int16x8_t coeff1 = vec_vsx_ld(16, coeff_ptr);
223 
224   int16x8_t coeff0_abs = vec_abs(coeff0);
225   int16x8_t coeff1_abs = vec_abs(coeff1);
226 
227   (void)scan_ptr;
228   (void)n_coeffs;
229 
230   // 32x32 quantization requires that zbin and round be divided by 2
231   zbin = vec_sra(vec_add(zbin, vec_ones_s16), vec_ones_u16);
232   round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16);
233 
234   zero_mask0 = vec_cmpge(coeff0_abs, zbin);
235   zbin = vec_splat(zbin, 1);  // remove DC from zbin
236   zero_mask1 = vec_cmpge(coeff1_abs, zbin);
237 
238   qcoeff0 = quantize_coeff_32(coeff0, coeff0_abs, round, quant, quant_shift,
239                               zero_mask0);
240   round = vec_splat(round, 1);              // remove DC from round
241   quant = vec_splat(quant, 1);              // remove DC from quant
242   quant_shift = vec_splat(quant_shift, 1);  // remove DC from quant_shift
243   qcoeff1 = quantize_coeff_32(coeff1, coeff1_abs, round, quant, quant_shift,
244                               zero_mask1);
245 
246   vec_vsx_st(qcoeff0, 0, qcoeff_ptr);
247   vec_vsx_st(qcoeff1, 16, qcoeff_ptr);
248 
249   vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), 0, dqcoeff_ptr);
250   dequant = vec_splat(dequant, 1);  // remove DC from dequant
251   vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), 16, dqcoeff_ptr);
252 
253   eob = vec_max(nonzero_scanindex(qcoeff0, iscan_ptr, 0),
254                 nonzero_scanindex(qcoeff1, iscan_ptr, 16));
255 
256   do {
257     int16x8_t coeff2, coeff2_abs, qcoeff2, eob2;
258     bool16x8_t zero_mask2;
259 
260     coeff0 = vec_vsx_ld(off0, coeff_ptr);
261     coeff1 = vec_vsx_ld(off1, coeff_ptr);
262     coeff2 = vec_vsx_ld(off2, coeff_ptr);
263 
264     coeff0_abs = vec_abs(coeff0);
265     coeff1_abs = vec_abs(coeff1);
266     coeff2_abs = vec_abs(coeff2);
267 
268     zero_mask0 = vec_cmpge(coeff0_abs, zbin);
269     zero_mask1 = vec_cmpge(coeff1_abs, zbin);
270     zero_mask2 = vec_cmpge(coeff2_abs, zbin);
271 
272     qcoeff0 = quantize_coeff_32(coeff0, coeff0_abs, round, quant, quant_shift,
273                                 zero_mask0);
274     qcoeff1 = quantize_coeff_32(coeff1, coeff1_abs, round, quant, quant_shift,
275                                 zero_mask1);
276     qcoeff2 = quantize_coeff_32(coeff2, coeff2_abs, round, quant, quant_shift,
277                                 zero_mask2);
278 
279     vec_vsx_st(qcoeff0, off0, qcoeff_ptr);
280     vec_vsx_st(qcoeff1, off1, qcoeff_ptr);
281     vec_vsx_st(qcoeff2, off2, qcoeff_ptr);
282 
283     vec_vsx_st(dequantize_coeff_32(qcoeff0, dequant), off0, dqcoeff_ptr);
284     vec_vsx_st(dequantize_coeff_32(qcoeff1, dequant), off1, dqcoeff_ptr);
285     vec_vsx_st(dequantize_coeff_32(qcoeff2, dequant), off2, dqcoeff_ptr);
286 
287     eob = vec_max(eob, nonzero_scanindex(qcoeff0, iscan_ptr, off0));
288     eob2 = vec_max(nonzero_scanindex(qcoeff1, iscan_ptr, off1),
289                    nonzero_scanindex(qcoeff2, iscan_ptr, off2));
290     eob = vec_max(eob, eob2);
291 
292     // 24 int16_t is 48 bytes
293     off0 += 48;
294     off1 += 48;
295     off2 += 48;
296     num_itr--;
297   } while (num_itr != 0);
298 
299   eob = vec_max_across(eob);
300   *eob_ptr = eob[0];
301 }
302