xref: /aosp_15_r20/external/libyuv/util/psnr.cc (revision 4e366538070a3a6c5c163c31b791eab742e1657a)
1*4e366538SXin Li /*
2*4e366538SXin Li  *  Copyright 2013 The LibYuv Project Authors. All rights reserved.
3*4e366538SXin Li  *
4*4e366538SXin Li  *  Use of this source code is governed by a BSD-style license
5*4e366538SXin Li  *  that can be found in the LICENSE file in the root of the source
6*4e366538SXin Li  *  tree. An additional intellectual property rights grant can be found
7*4e366538SXin Li  *  in the file PATENTS. All contributing project authors may
8*4e366538SXin Li  *  be found in the AUTHORS file in the root of the source tree.
9*4e366538SXin Li  */
10*4e366538SXin Li 
11*4e366538SXin Li #include "./psnr.h"  // NOLINT
12*4e366538SXin Li 
13*4e366538SXin Li #ifdef _OPENMP
14*4e366538SXin Li #include <omp.h>
15*4e366538SXin Li #endif
16*4e366538SXin Li #ifdef _MSC_VER
17*4e366538SXin Li #include <intrin.h>  // For __cpuid()
18*4e366538SXin Li #endif
19*4e366538SXin Li 
20*4e366538SXin Li #ifdef __cplusplus
21*4e366538SXin Li extern "C" {
22*4e366538SXin Li #endif
23*4e366538SXin Li 
24*4e366538SXin Li typedef unsigned int uint32_t;  // NOLINT
25*4e366538SXin Li #ifdef _MSC_VER
26*4e366538SXin Li typedef unsigned __int64 uint64_t;
27*4e366538SXin Li #else  // COMPILER_MSVC
28*4e366538SXin Li #if defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__)
29*4e366538SXin Li typedef unsigned long uint64_t;  // NOLINT
30*4e366538SXin Li #else   // defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__)
31*4e366538SXin Li typedef unsigned long long uint64_t;  // NOLINT
32*4e366538SXin Li #endif  // __LP64__
33*4e366538SXin Li #endif  // _MSC_VER
34*4e366538SXin Li 
35*4e366538SXin Li // libyuv provides this function when linking library for jpeg support.
36*4e366538SXin Li #if !defined(HAVE_JPEG)
37*4e366538SXin Li 
38*4e366538SXin Li #if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
39*4e366538SXin Li     !defined(__aarch64__)
40*4e366538SXin Li #define HAS_SUMSQUAREERROR_NEON
SumSquareError_NEON(const uint8_t * src_a,const uint8_t * src_b,int count)41*4e366538SXin Li static uint32_t SumSquareError_NEON(const uint8_t* src_a,
42*4e366538SXin Li                                     const uint8_t* src_b,
43*4e366538SXin Li                                     int count) {
44*4e366538SXin Li   volatile uint32_t sse;
45*4e366538SXin Li   asm volatile(
46*4e366538SXin Li       "vmov.u8    q7, #0                         \n"
47*4e366538SXin Li       "vmov.u8    q9, #0                         \n"
48*4e366538SXin Li       "vmov.u8    q8, #0                         \n"
49*4e366538SXin Li       "vmov.u8    q10, #0                        \n"
50*4e366538SXin Li 
51*4e366538SXin Li       "1:                                        \n"
52*4e366538SXin Li       "vld1.u8    {q0}, [%0]!                    \n"
53*4e366538SXin Li       "vld1.u8    {q1}, [%1]!                    \n"
54*4e366538SXin Li       "vsubl.u8   q2, d0, d2                     \n"
55*4e366538SXin Li       "vsubl.u8   q3, d1, d3                     \n"
56*4e366538SXin Li       "vmlal.s16  q7, d4, d4                     \n"
57*4e366538SXin Li       "vmlal.s16  q8, d6, d6                     \n"
58*4e366538SXin Li       "vmlal.s16  q8, d5, d5                     \n"
59*4e366538SXin Li       "vmlal.s16  q10, d7, d7                    \n"
60*4e366538SXin Li       "subs       %2, %2, #16                    \n"
61*4e366538SXin Li       "bhi        1b                             \n"
62*4e366538SXin Li 
63*4e366538SXin Li       "vadd.u32   q7, q7, q8                     \n"
64*4e366538SXin Li       "vadd.u32   q9, q9, q10                    \n"
65*4e366538SXin Li       "vadd.u32   q10, q7, q9                    \n"
66*4e366538SXin Li       "vpaddl.u32 q1, q10                        \n"
67*4e366538SXin Li       "vadd.u64   d0, d2, d3                     \n"
68*4e366538SXin Li       "vmov.32    %3, d0[0]                      \n"
69*4e366538SXin Li       : "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
70*4e366538SXin Li       :
71*4e366538SXin Li       : "memory", "cc", "q0", "q1", "q2", "q3", "q7", "q8", "q9", "q10");
72*4e366538SXin Li   return sse;
73*4e366538SXin Li }
74*4e366538SXin Li #elif !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
75*4e366538SXin Li #define HAS_SUMSQUAREERROR_NEON
SumSquareError_NEON(const uint8_t * src_a,const uint8_t * src_b,int count)76*4e366538SXin Li static uint32_t SumSquareError_NEON(const uint8_t* src_a,
77*4e366538SXin Li                                     const uint8_t* src_b,
78*4e366538SXin Li                                     int count) {
79*4e366538SXin Li   volatile uint32_t sse;
80*4e366538SXin Li   asm volatile(
81*4e366538SXin Li       "eor        v16.16b, v16.16b, v16.16b      \n"
82*4e366538SXin Li       "eor        v18.16b, v18.16b, v18.16b      \n"
83*4e366538SXin Li       "eor        v17.16b, v17.16b, v17.16b      \n"
84*4e366538SXin Li       "eor        v19.16b, v19.16b, v19.16b      \n"
85*4e366538SXin Li 
86*4e366538SXin Li       "1:                                        \n"
87*4e366538SXin Li       "ld1        {v0.16b}, [%0], #16            \n"
88*4e366538SXin Li       "ld1        {v1.16b}, [%1], #16            \n"
89*4e366538SXin Li       "subs       %w2, %w2, #16                  \n"
90*4e366538SXin Li       "usubl      v2.8h, v0.8b, v1.8b            \n"
91*4e366538SXin Li       "usubl2     v3.8h, v0.16b, v1.16b          \n"
92*4e366538SXin Li       "smlal      v16.4s, v2.4h, v2.4h           \n"
93*4e366538SXin Li       "smlal      v17.4s, v3.4h, v3.4h           \n"
94*4e366538SXin Li       "smlal2     v18.4s, v2.8h, v2.8h           \n"
95*4e366538SXin Li       "smlal2     v19.4s, v3.8h, v3.8h           \n"
96*4e366538SXin Li       "b.gt       1b                             \n"
97*4e366538SXin Li 
98*4e366538SXin Li       "add        v16.4s, v16.4s, v17.4s         \n"
99*4e366538SXin Li       "add        v18.4s, v18.4s, v19.4s         \n"
100*4e366538SXin Li       "add        v19.4s, v16.4s, v18.4s         \n"
101*4e366538SXin Li       "addv       s0, v19.4s                     \n"
102*4e366538SXin Li       "fmov       %w3, s0                        \n"
103*4e366538SXin Li       : "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
104*4e366538SXin Li       :
105*4e366538SXin Li       : "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19");
106*4e366538SXin Li   return sse;
107*4e366538SXin Li }
108*4e366538SXin Li #elif !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER)
109*4e366538SXin Li #define HAS_SUMSQUAREERROR_SSE2
SumSquareError_SSE2(const uint8_t *,const uint8_t *,int)110*4e366538SXin Li __declspec(naked) static uint32_t SumSquareError_SSE2(const uint8_t* /*src_a*/,
111*4e366538SXin Li                                                       const uint8_t* /*src_b*/,
112*4e366538SXin Li                                                       int /*count*/) {
113*4e366538SXin Li   __asm {
114*4e366538SXin Li     mov        eax, [esp + 4]  // src_a
115*4e366538SXin Li     mov        edx, [esp + 8]  // src_b
116*4e366538SXin Li     mov        ecx, [esp + 12]  // count
117*4e366538SXin Li     pxor       xmm0, xmm0
118*4e366538SXin Li     pxor       xmm5, xmm5
119*4e366538SXin Li     sub        edx, eax
120*4e366538SXin Li 
121*4e366538SXin Li   wloop:
122*4e366538SXin Li     movdqu     xmm1, [eax]
123*4e366538SXin Li     movdqu     xmm2, [eax + edx]
124*4e366538SXin Li     lea        eax,  [eax + 16]
125*4e366538SXin Li     movdqu     xmm3, xmm1
126*4e366538SXin Li     psubusb    xmm1, xmm2
127*4e366538SXin Li     psubusb    xmm2, xmm3
128*4e366538SXin Li     por        xmm1, xmm2
129*4e366538SXin Li     movdqu     xmm2, xmm1
130*4e366538SXin Li     punpcklbw  xmm1, xmm5
131*4e366538SXin Li     punpckhbw  xmm2, xmm5
132*4e366538SXin Li     pmaddwd    xmm1, xmm1
133*4e366538SXin Li     pmaddwd    xmm2, xmm2
134*4e366538SXin Li     paddd      xmm0, xmm1
135*4e366538SXin Li     paddd      xmm0, xmm2
136*4e366538SXin Li     sub        ecx, 16
137*4e366538SXin Li     ja         wloop
138*4e366538SXin Li 
139*4e366538SXin Li     pshufd     xmm1, xmm0, 0EEh
140*4e366538SXin Li     paddd      xmm0, xmm1
141*4e366538SXin Li     pshufd     xmm1, xmm0, 01h
142*4e366538SXin Li     paddd      xmm0, xmm1
143*4e366538SXin Li     movd       eax, xmm0
144*4e366538SXin Li     ret
145*4e366538SXin Li   }
146*4e366538SXin Li }
147*4e366538SXin Li #elif !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
148*4e366538SXin Li #define HAS_SUMSQUAREERROR_SSE2
SumSquareError_SSE2(const uint8_t * src_a,const uint8_t * src_b,int count)149*4e366538SXin Li static uint32_t SumSquareError_SSE2(const uint8_t* src_a,
150*4e366538SXin Li                                     const uint8_t* src_b,
151*4e366538SXin Li                                     int count) {
152*4e366538SXin Li   uint32_t sse;
153*4e366538SXin Li   asm volatile(  // NOLINT
154*4e366538SXin Li       "pxor      %%xmm0,%%xmm0                   \n"
155*4e366538SXin Li       "pxor      %%xmm5,%%xmm5                   \n"
156*4e366538SXin Li       "sub       %0,%1                           \n"
157*4e366538SXin Li 
158*4e366538SXin Li       "1:                                        \n"
159*4e366538SXin Li       "movdqu    (%0),%%xmm1                     \n"
160*4e366538SXin Li       "movdqu    (%0,%1,1),%%xmm2                \n"
161*4e366538SXin Li       "lea       0x10(%0),%0                     \n"
162*4e366538SXin Li       "movdqu    %%xmm1,%%xmm3                   \n"
163*4e366538SXin Li       "psubusb   %%xmm2,%%xmm1                   \n"
164*4e366538SXin Li       "psubusb   %%xmm3,%%xmm2                   \n"
165*4e366538SXin Li       "por       %%xmm2,%%xmm1                   \n"
166*4e366538SXin Li       "movdqu    %%xmm1,%%xmm2                   \n"
167*4e366538SXin Li       "punpcklbw %%xmm5,%%xmm1                   \n"
168*4e366538SXin Li       "punpckhbw %%xmm5,%%xmm2                   \n"
169*4e366538SXin Li       "pmaddwd   %%xmm1,%%xmm1                   \n"
170*4e366538SXin Li       "pmaddwd   %%xmm2,%%xmm2                   \n"
171*4e366538SXin Li       "paddd     %%xmm1,%%xmm0                   \n"
172*4e366538SXin Li       "paddd     %%xmm2,%%xmm0                   \n"
173*4e366538SXin Li       "sub       $0x10,%2                        \n"
174*4e366538SXin Li       "ja        1b                              \n"
175*4e366538SXin Li 
176*4e366538SXin Li       "pshufd    $0xee,%%xmm0,%%xmm1             \n"
177*4e366538SXin Li       "paddd     %%xmm1,%%xmm0                   \n"
178*4e366538SXin Li       "pshufd    $0x1,%%xmm0,%%xmm1              \n"
179*4e366538SXin Li       "paddd     %%xmm1,%%xmm0                   \n"
180*4e366538SXin Li       "movd      %%xmm0,%3                       \n"
181*4e366538SXin Li 
182*4e366538SXin Li       : "+r"(src_a),  // %0
183*4e366538SXin Li         "+r"(src_b),  // %1
184*4e366538SXin Li         "+r"(count),  // %2
185*4e366538SXin Li         "=g"(sse)     // %3
186*4e366538SXin Li       :
187*4e366538SXin Li       : "memory", "cc"
188*4e366538SXin Li #if defined(__SSE2__)
189*4e366538SXin Li         ,
190*4e366538SXin Li         "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
191*4e366538SXin Li #endif
192*4e366538SXin Li   );  // NOLINT
193*4e366538SXin Li   return sse;
194*4e366538SXin Li }
195*4e366538SXin Li #endif  // LIBYUV_DISABLE_X86 etc
196*4e366538SXin Li 
197*4e366538SXin Li #if defined(HAS_SUMSQUAREERROR_SSE2)
198*4e366538SXin Li #if (defined(__pic__) || defined(__APPLE__)) && defined(__i386__)
__cpuid(int cpu_info[4],int info_type)199*4e366538SXin Li static __inline void __cpuid(int cpu_info[4], int info_type) {
200*4e366538SXin Li   asm volatile(  // NOLINT
201*4e366538SXin Li       "mov %%ebx, %%edi                          \n"
202*4e366538SXin Li       "cpuid                                     \n"
203*4e366538SXin Li       "xchg %%edi, %%ebx                         \n"
204*4e366538SXin Li       : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
205*4e366538SXin Li         "=d"(cpu_info[3])
206*4e366538SXin Li       : "a"(info_type));
207*4e366538SXin Li }
208*4e366538SXin Li // For gcc/clang but not clangcl.
209*4e366538SXin Li #elif !defined(_MSC_VER) && (defined(__i386__) || defined(__x86_64__))
__cpuid(int cpu_info[4],int info_type)210*4e366538SXin Li static __inline void __cpuid(int cpu_info[4], int info_type) {
211*4e366538SXin Li   asm volatile(  // NOLINT
212*4e366538SXin Li       "cpuid                                     \n"
213*4e366538SXin Li       : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
214*4e366538SXin Li         "=d"(cpu_info[3])
215*4e366538SXin Li       : "a"(info_type));
216*4e366538SXin Li }
217*4e366538SXin Li #endif
218*4e366538SXin Li 
CpuHasSSE2()219*4e366538SXin Li static int CpuHasSSE2() {
220*4e366538SXin Li #if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)
221*4e366538SXin Li   int cpu_info[4];
222*4e366538SXin Li   __cpuid(cpu_info, 1);
223*4e366538SXin Li   if (cpu_info[3] & 0x04000000) {
224*4e366538SXin Li     return 1;
225*4e366538SXin Li   }
226*4e366538SXin Li #endif
227*4e366538SXin Li   return 0;
228*4e366538SXin Li }
229*4e366538SXin Li #endif  // HAS_SUMSQUAREERROR_SSE2
230*4e366538SXin Li 
SumSquareError_C(const uint8_t * src_a,const uint8_t * src_b,int count)231*4e366538SXin Li static uint32_t SumSquareError_C(const uint8_t* src_a,
232*4e366538SXin Li                                  const uint8_t* src_b,
233*4e366538SXin Li                                  int count) {
234*4e366538SXin Li   uint32_t sse = 0u;
235*4e366538SXin Li   for (int x = 0; x < count; ++x) {
236*4e366538SXin Li     int diff = src_a[x] - src_b[x];
237*4e366538SXin Li     sse += static_cast<uint32_t>(diff * diff);
238*4e366538SXin Li   }
239*4e366538SXin Li   return sse;
240*4e366538SXin Li }
241*4e366538SXin Li 
ComputeSumSquareError(const uint8_t * src_a,const uint8_t * src_b,int count)242*4e366538SXin Li double ComputeSumSquareError(const uint8_t* src_a,
243*4e366538SXin Li                              const uint8_t* src_b,
244*4e366538SXin Li                              int count) {
245*4e366538SXin Li   uint32_t (*SumSquareError)(const uint8_t* src_a, const uint8_t* src_b,
246*4e366538SXin Li                              int count) = SumSquareError_C;
247*4e366538SXin Li #if defined(HAS_SUMSQUAREERROR_NEON)
248*4e366538SXin Li   SumSquareError = SumSquareError_NEON;
249*4e366538SXin Li #endif
250*4e366538SXin Li #if defined(HAS_SUMSQUAREERROR_SSE2)
251*4e366538SXin Li   if (CpuHasSSE2()) {
252*4e366538SXin Li     SumSquareError = SumSquareError_SSE2;
253*4e366538SXin Li   }
254*4e366538SXin Li #endif
255*4e366538SXin Li   const int kBlockSize = 1 << 15;
256*4e366538SXin Li   uint64_t sse = 0;
257*4e366538SXin Li #ifdef _OPENMP
258*4e366538SXin Li #pragma omp parallel for reduction(+ : sse)
259*4e366538SXin Li #endif
260*4e366538SXin Li   for (int i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
261*4e366538SXin Li     sse += SumSquareError(src_a + i, src_b + i, kBlockSize);
262*4e366538SXin Li   }
263*4e366538SXin Li   src_a += count & ~(kBlockSize - 1);
264*4e366538SXin Li   src_b += count & ~(kBlockSize - 1);
265*4e366538SXin Li   int remainder = count & (kBlockSize - 1) & ~15;
266*4e366538SXin Li   if (remainder) {
267*4e366538SXin Li     sse += SumSquareError(src_a, src_b, remainder);
268*4e366538SXin Li     src_a += remainder;
269*4e366538SXin Li     src_b += remainder;
270*4e366538SXin Li   }
271*4e366538SXin Li   remainder = count & 15;
272*4e366538SXin Li   if (remainder) {
273*4e366538SXin Li     sse += SumSquareError_C(src_a, src_b, remainder);
274*4e366538SXin Li   }
275*4e366538SXin Li   return static_cast<double>(sse);
276*4e366538SXin Li }
277*4e366538SXin Li #endif
278*4e366538SXin Li 
279*4e366538SXin Li // PSNR formula: psnr = 10 * log10 (Peak Signal^2 * size / sse)
280*4e366538SXin Li // Returns 128.0 (kMaxPSNR) if sse is 0 (perfect match).
ComputePSNR(double sse,double size)281*4e366538SXin Li double ComputePSNR(double sse, double size) {
282*4e366538SXin Li   const double kMINSSE = 255.0 * 255.0 * size / pow(10.0, kMaxPSNR / 10.0);
283*4e366538SXin Li   if (sse <= kMINSSE) {
284*4e366538SXin Li     sse = kMINSSE;  // Produces max PSNR of 128
285*4e366538SXin Li   }
286*4e366538SXin Li   return 10.0 * log10(255.0 * 255.0 * size / sse);
287*4e366538SXin Li }
288*4e366538SXin Li 
289*4e366538SXin Li #ifdef __cplusplus
290*4e366538SXin Li }  // extern "C"
291*4e366538SXin Li #endif
292