xref: /aosp_15_r20/external/mesa3d/src/util/u_cpu_detect.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /**************************************************************************
2  *
3  * Copyright 2008 Dennis Smit
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * on the rights to use, copy, modify, merge, publish, distribute, sub
10  * license, and/or sell copies of the Software, and to permit persons to whom
11  * the Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
20  * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  **************************************************************************/
26 
27 /**
28  * @file
29  * CPU feature detection.
30  *
31  * @author Dennis Smit
32  * @author Based on the work of Eric Anholt <[email protected]>
33  */
34 
35 #include "util/detect.h"
36 #include "util/compiler.h"
37 
38 #include "util/u_debug.h"
39 #include "u_cpu_detect.h"
40 #include "u_math.h"
41 #include "os_file.h"
42 #include "c11/threads.h"
43 
44 #include <stdio.h>
45 #include <inttypes.h>
46 
47 #if DETECT_ARCH_PPC
48 #if DETECT_OS_APPLE
49 #include <sys/sysctl.h>
50 #else
51 #include <signal.h>
52 #include <setjmp.h>
53 #endif
54 #endif
55 
56 #if DETECT_OS_BSD
57 #include <sys/param.h>
58 #include <sys/sysctl.h>
59 #include <machine/cpu.h>
60 #endif
61 
62 #if DETECT_OS_FREEBSD
63 #if __has_include(<sys/auxv.h>)
64 #include <sys/auxv.h>
65 #define HAVE_ELF_AUX_INFO
66 #endif
67 #endif
68 
69 #if DETECT_OS_LINUX
70 #include <signal.h>
71 #include <fcntl.h>
72 #include <elf.h>
73 #endif
74 
75 #if DETECT_OS_POSIX
76 #include <unistd.h>
77 #endif
78 
79 #if defined(HAS_ANDROID_CPUFEATURES)
80 #include <cpu-features.h>
81 #endif
82 
83 #if DETECT_OS_WINDOWS
84 #include <windows.h>
85 #if DETECT_CC_MSVC
86 #include <intrin.h>
87 #endif
88 #endif
89 
90 #if defined(HAS_SCHED_H)
91 #include <sched.h>
92 #endif
93 
94 // prevent inadvert infinite recursion
95 #define util_get_cpu_caps() util_get_cpu_caps_DO_NOT_USE()
96 
97 DEBUG_GET_ONCE_BOOL_OPTION(dump_cpu, "GALLIUM_DUMP_CPU", false)
98 
99 static
100 struct util_cpu_caps_t util_cpu_caps;
101 
102 /* Do not try to access _util_cpu_caps_state directly, call to util_get_cpu_caps instead */
103 struct _util_cpu_caps_state_t _util_cpu_caps_state = {
104    .once_flag = ONCE_FLAG_INIT,
105    .detect_done = 0,
106 };
107 
108 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
109 static int has_cpuid(void);
110 #endif
111 
112 
113 #if DETECT_ARCH_PPC && !DETECT_OS_APPLE && !DETECT_OS_BSD && !DETECT_OS_LINUX
114 static jmp_buf  __lv_powerpc_jmpbuf;
115 static volatile sig_atomic_t __lv_powerpc_canjump = 0;
116 
117 static void
sigill_handler(int sig)118 sigill_handler(int sig)
119 {
120    if (!__lv_powerpc_canjump) {
121       signal (sig, SIG_DFL);
122       raise (sig);
123    }
124 
125    __lv_powerpc_canjump = 0;
126    longjmp(__lv_powerpc_jmpbuf, 1);
127 }
128 #endif
129 
130 #if DETECT_ARCH_PPC
131 static void
check_os_altivec_support(void)132 check_os_altivec_support(void)
133 {
134 #if defined(__ALTIVEC__)
135    util_cpu_caps.has_altivec = 1;
136 #endif
137 #if defined(__VSX__)
138    util_cpu_caps.has_vsx = 1;
139 #endif
140 #if defined(__ALTIVEC__) && defined(__VSX__)
141 /* Do nothing */
142 #elif DETECT_OS_APPLE || DETECT_OS_NETBSD || DETECT_OS_OPENBSD
143 #ifdef HW_VECTORUNIT
144    int sels[2] = {CTL_HW, HW_VECTORUNIT};
145 #else
146    int sels[2] = {CTL_MACHDEP, CPU_ALTIVEC};
147 #endif
148    int has_vu = 0;
149    size_t len = sizeof (has_vu);
150    int err;
151 
152    err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
153 
154    if (err == 0) {
155       if (has_vu != 0) {
156          util_cpu_caps.has_altivec = 1;
157       }
158    }
159 #elif DETECT_OS_FREEBSD /* !DETECT_OS_APPLE && !DETECT_OS_NETBSD && !DETECT_OS_OPENBSD */
160    unsigned long hwcap = 0;
161 #ifdef HAVE_ELF_AUX_INFO
162    elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
163 #else
164    size_t len = sizeof(hwcap);
165    sysctlbyname("hw.cpu_features", &hwcap, &len, NULL, 0);
166 #endif
167    if (hwcap & PPC_FEATURE_HAS_ALTIVEC)
168       util_cpu_caps.has_altivec = 1;
169    if (hwcap & PPC_FEATURE_HAS_VSX)
170       util_cpu_caps.has_vsx = 1;
171 #elif DETECT_OS_LINUX /* !DETECT_OS_FREEBSD */
172 #if DETECT_ARCH_PPC_64
173     Elf64_auxv_t aux;
174 #else
175     Elf32_auxv_t aux;
176 #endif
177     int fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
178     if (fd >= 0) {
179        while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
180           if (aux.a_type == AT_HWCAP) {
181              char *env_vsx = getenv("GALLIVM_VSX");
182              uint64_t hwcap = aux.a_un.a_val;
183              util_cpu_caps.has_altivec = (hwcap >> 28) & 1;
184              if (!env_vsx || env_vsx[0] != '0') {
185                 util_cpu_caps.has_vsx  = (hwcap >>  7) & 1;
186              }
187              break;
188           }
189        }
190        close(fd);
191     }
192 #else /* !DETECT_OS_APPLE && !DETECT_OS_BSD && !DETECT_OS_LINUX */
193    /* not on Apple/Darwin or Linux, do it the brute-force way */
194    /* this is borrowed from the libmpeg2 library */
195    signal(SIGILL, sigill_handler);
196    if (setjmp(__lv_powerpc_jmpbuf)) {
197       signal(SIGILL, SIG_DFL);
198    } else {
199       bool enable_altivec = true;    /* Default: enable  if available, and if not overridden */
200       bool enable_vsx = true;
201 #if MESA_DEBUG
202       /* Disabling Altivec code generation is not the same as disabling VSX code generation,
203        * which can be done simply by passing -mattr=-vsx to the LLVM compiler; cf.
204        * lp_build_create_jit_compiler_for_module().
205        * If you want to disable Altivec code generation, the best place to do it is here.
206        */
207       char *env_control = getenv("GALLIVM_ALTIVEC");    /* 1=enable (default); 0=disable */
208       if (env_control && env_control[0] == '0') {
209          enable_altivec = false;
210       }
211 #endif
212       /* VSX instructions can be explicitly enabled/disabled via GALLIVM_VSX=1 or 0 */
213       char *env_vsx = getenv("GALLIVM_VSX");
214       if (env_vsx && env_vsx[0] == '0') {
215          enable_vsx = false;
216       }
217       if (enable_altivec) {
218          __lv_powerpc_canjump = 1;
219 
220          __asm __volatile
221             ("mtspr 256, %0\n\t"
222              "vand %%v0, %%v0, %%v0"
223              :
224              : "r" (-1));
225 
226          util_cpu_caps.has_altivec = 1;
227 
228          if (enable_vsx) {
229             __asm __volatile("xxland %vs0, %vs0, %vs0");
230             util_cpu_caps.has_vsx = 1;
231          }
232          signal(SIGILL, SIG_DFL);
233       } else {
234          util_cpu_caps.has_altivec = 0;
235       }
236    }
237 #endif /* !DETECT_OS_APPLE && !DETECT_OS_LINUX */
238 }
239 #endif /* DETECT_ARCH_PPC */
240 
241 
242 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
has_cpuid(void)243 static int has_cpuid(void)
244 {
245 #if DETECT_ARCH_X86
246 #if DETECT_OS_GCC
247    int a, c;
248 
249    __asm __volatile
250       ("pushf\n"
251        "popl %0\n"
252        "movl %0, %1\n"
253        "xorl $0x200000, %0\n"
254        "push %0\n"
255        "popf\n"
256        "pushf\n"
257        "popl %0\n"
258        : "=a" (a), "=c" (c)
259        :
260        : "cc");
261 
262    return a != c;
263 #else
264    /* FIXME */
265    return 1;
266 #endif
267 #elif DETECT_ARCH_X86_64
268    return 1;
269 #else
270    return 0;
271 #endif
272 }
273 
274 
275 /**
276  * @sa cpuid.h included in gcc-4.3 onwards.
277  * @sa http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
278  */
279 static inline void
cpuid(uint32_t ax,uint32_t * p)280 cpuid(uint32_t ax, uint32_t *p)
281 {
282 #if DETECT_CC_GCC && DETECT_ARCH_X86
283    __asm __volatile (
284      "xchgl %%ebx, %1\n\t"
285      "cpuid\n\t"
286      "xchgl %%ebx, %1"
287      : "=a" (p[0]),
288        "=S" (p[1]),
289        "=c" (p[2]),
290        "=d" (p[3])
291      : "0" (ax)
292    );
293 #elif DETECT_CC_GCC && DETECT_ARCH_X86_64
294    __asm __volatile (
295      "cpuid\n\t"
296      : "=a" (p[0]),
297        "=b" (p[1]),
298        "=c" (p[2]),
299        "=d" (p[3])
300      : "0" (ax)
301    );
302 #elif DETECT_CC_MSVC
303    __cpuid(p, ax);
304 #else
305    p[0] = 0;
306    p[1] = 0;
307    p[2] = 0;
308    p[3] = 0;
309 #endif
310 }
311 
312 /**
313  * @sa cpuid.h included in gcc-4.4 onwards.
314  * @sa http://msdn.microsoft.com/en-us/library/hskdteyh%28v=vs.90%29.aspx
315  */
316 static inline void
cpuid_count(uint32_t ax,uint32_t cx,uint32_t * p)317 cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p)
318 {
319 #if DETECT_CC_GCC && DETECT_ARCH_X86
320    __asm __volatile (
321      "xchgl %%ebx, %1\n\t"
322      "cpuid\n\t"
323      "xchgl %%ebx, %1"
324      : "=a" (p[0]),
325        "=S" (p[1]),
326        "=c" (p[2]),
327        "=d" (p[3])
328      : "0" (ax), "2" (cx)
329    );
330 #elif DETECT_CC_GCC && DETECT_ARCH_X86_64
331    __asm __volatile (
332      "cpuid\n\t"
333      : "=a" (p[0]),
334        "=b" (p[1]),
335        "=c" (p[2]),
336        "=d" (p[3])
337      : "0" (ax), "2" (cx)
338    );
339 #elif DETECT_CC_MSVC
340    __cpuidex(p, ax, cx);
341 #else
342    p[0] = 0;
343    p[1] = 0;
344    p[2] = 0;
345    p[3] = 0;
346 #endif
347 }
348 
349 
xgetbv(void)350 static inline uint64_t xgetbv(void)
351 {
352 #if DETECT_CC_GCC
353    uint32_t eax, edx;
354 
355    __asm __volatile (
356      ".byte 0x0f, 0x01, 0xd0" // xgetbv isn't supported on gcc < 4.4
357      : "=a"(eax),
358        "=d"(edx)
359      : "c"(0)
360    );
361 
362    return ((uint64_t)edx << 32) | eax;
363 #elif DETECT_CC_MSVC && defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
364    return _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
365 #else
366    return 0;
367 #endif
368 }
369 
370 
371 #if DETECT_ARCH_X86
372 UTIL_ALIGN_STACK
373 static inline bool
sse2_has_daz(void)374 sse2_has_daz(void)
375 {
376    alignas(16) struct {
377       uint32_t pad1[7];
378       uint32_t mxcsr_mask;
379       uint32_t pad2[128-8];
380    } fxarea;
381 
382    fxarea.mxcsr_mask = 0;
383 #if DETECT_CC_GCC
384    __asm __volatile ("fxsave %0" : "+m" (fxarea));
385 #elif DETECT_CC_MSVC || DETECT_CC_ICL
386    _fxsave(&fxarea);
387 #else
388    fxarea.mxcsr_mask = 0;
389 #endif
390    return !!(fxarea.mxcsr_mask & (1 << 6));
391 }
392 #endif
393 
394 #endif /* X86 or X86_64 */
395 
396 #if DETECT_ARCH_ARM
397 static void
check_os_arm_support(void)398 check_os_arm_support(void)
399 {
400    /*
401     * On Android, the cpufeatures library is preferred way of checking
402     * CPU capabilities. However, it is not available for standalone Mesa
403     * builds, i.e. when Android build system (Android.mk-based) is not
404     * used. Because of this we cannot use DETECT_OS_ANDROID here, but rather
405     * have a separate macro that only gets enabled from respective Android.mk.
406     */
407 #if defined(__ARM_NEON) || defined(__ARM_NEON__)
408    util_cpu_caps.has_neon = 1;
409 #elif DETECT_OS_FREEBSD && defined(HAVE_ELF_AUX_INFO)
410    unsigned long hwcap = 0;
411    elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
412    if (hwcap & HWCAP_NEON)
413       util_cpu_caps.has_neon = 1;
414 #elif defined(HAS_ANDROID_CPUFEATURES)
415    AndroidCpuFamily cpu_family = android_getCpuFamily();
416    uint64_t cpu_features = android_getCpuFeatures();
417 
418    if (cpu_family == ANDROID_CPU_FAMILY_ARM) {
419       if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)
420          util_cpu_caps.has_neon = 1;
421    }
422 #elif DETECT_OS_LINUX
423     Elf32_auxv_t aux;
424     int fd;
425 
426     fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
427     if (fd >= 0) {
428        while (read(fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) {
429           if (aux.a_type == AT_HWCAP) {
430              uint32_t hwcap = aux.a_un.a_val;
431 
432              util_cpu_caps.has_neon = (hwcap >> 12) & 1;
433              break;
434           }
435        }
436        close (fd);
437     }
438 #endif /* DETECT_OS_LINUX */
439 }
440 
441 #elif DETECT_ARCH_AARCH64
442 static void
check_os_arm_support(void)443 check_os_arm_support(void)
444 {
445     util_cpu_caps.has_neon = true;
446 }
447 #endif /* DETECT_ARCH_ARM || DETECT_ARCH_AARCH64 */
448 
449 #if DETECT_ARCH_MIPS64
450 static void
check_os_mips64_support(void)451 check_os_mips64_support(void)
452 {
453 #if DETECT_OS_LINUX
454     Elf64_auxv_t aux;
455     int fd;
456 
457     fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
458     if (fd >= 0) {
459        while (read(fd, &aux, sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
460           if (aux.a_type == AT_HWCAP) {
461              uint64_t hwcap = aux.a_un.a_val;
462 
463              util_cpu_caps.has_msa = (hwcap >> 1) & 1;
464              break;
465           }
466        }
467        close (fd);
468     }
469 #endif /* DETECT_OS_LINUX */
470 }
471 #endif /* DETECT_ARCH_MIPS64 */
472 
473 #if DETECT_ARCH_LOONGARCH64
474 static void
check_os_loongarch64_support(void)475 check_os_loongarch64_support(void)
476 {
477 #if DETECT_OS_LINUX
478     Elf64_auxv_t aux;
479     int fd;
480 
481     fd = open("/proc/self/auxv", O_RDONLY | O_CLOEXEC);
482     if (fd >= 0) {
483        while (read(fd, &aux, sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
484           if (aux.a_type == AT_HWCAP) {
485              uint64_t hwcap = aux.a_un.a_val;
486 
487              util_cpu_caps.has_lsx = (hwcap >> 4) & 1;
488              util_cpu_caps.has_lasx = (hwcap >> 5) & 1;
489              break;
490           }
491        }
492        close (fd);
493     }
494 #endif /* DETECT_OS_LINUX */
495 }
496 #endif /* DETECT_ARCH_LOONGARCH64 */
497 
498 
499 static void
get_cpu_topology(void)500 get_cpu_topology(void)
501 {
502    /* Default. This is OK if L3 is not present or there is only one. */
503    util_cpu_caps.num_L3_caches = 1;
504 
505    memset(util_cpu_caps.cpu_to_L3, 0xff, sizeof(util_cpu_caps.cpu_to_L3));
506 
507 #if DETECT_OS_LINUX
508    uint64_t big_cap = 0;
509    unsigned num_big_cpus = 0;
510    uint64_t *caps = malloc(sizeof(uint64_t) * util_cpu_caps.max_cpus);
511    bool fail = false;
512    for (unsigned i = 0; caps && i < util_cpu_caps.max_cpus; i++) {
513       char name[PATH_MAX];
514       snprintf(name, sizeof(name), "/sys/devices/system/cpu/cpu%u/cpu_capacity", i);
515       size_t size = 0;
516       char *cap = os_read_file(name, &size);
517       if (!cap) {
518          num_big_cpus = 0;
519          fail = true;
520          break;
521       }
522       errno = 0;
523       caps[i] = strtoull(cap, NULL, 10);
524       free(cap);
525       if (errno) {
526          fail = true;
527          break;
528       }
529       big_cap = MAX2(caps[i], big_cap);
530    }
531    if (!fail) {
532       for (unsigned i = 0; caps && i < util_cpu_caps.max_cpus; i++) {
533          if (caps[i] >= big_cap / 2)
534             num_big_cpus++;
535       }
536    }
537    free(caps);
538    util_cpu_caps.nr_big_cpus = num_big_cpus;
539 #endif
540 
541 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
542    /* AMD Zen */
543    if (util_cpu_caps.family >= CPU_AMD_ZEN1_ZEN2 &&
544        util_cpu_caps.family < CPU_AMD_LAST) {
545       uint32_t regs[4];
546 
547       uint32_t saved_mask[UTIL_MAX_CPUS / 32] = {0};
548       uint32_t mask[UTIL_MAX_CPUS / 32] = {0};
549       bool saved = false;
550 
551       uint32_t L3_found[UTIL_MAX_CPUS] = {0};
552       uint32_t num_L3_caches = 0;
553       util_affinity_mask *L3_affinity_masks = NULL;
554 
555       /* Query APIC IDs from each CPU core.
556        *
557        * An APIC ID is a logical ID of the CPU with respect to the cache
558        * hierarchy, meaning that consecutive APIC IDs are neighbours in
559        * the hierarchy, e.g. sharing the same cache.
560        *
561        * For example, CPU 0 can have APIC ID 0 and CPU 12 can have APIC ID 1,
562        * which means that both CPU 0 and 12 are next to each other.
563        * (e.g. they are 2 threads belonging to 1 SMT2 core)
564        *
565        * We need to find out which CPUs share the same L3 cache and they can
566        * be all over the place.
567        *
568        * Querying the APIC ID can only be done by pinning the current thread
569        * to each core. The original affinity mask is saved.
570        *
571        * Loop over all possible CPUs even though some may be offline.
572        */
573       for (int16_t i = 0; i < util_cpu_caps.max_cpus && i < UTIL_MAX_CPUS; i++) {
574          uint32_t cpu_bit = 1u << (i % 32);
575 
576          mask[i / 32] = cpu_bit;
577 
578          /* The assumption is that trying to bind the thread to a CPU that is
579           * offline will fail.
580           */
581          if (util_set_current_thread_affinity(mask,
582                                               !saved ? saved_mask : NULL,
583                                               util_cpu_caps.num_cpu_mask_bits)) {
584             saved = true;
585 
586             /* Query the APIC ID of the current core. */
587             cpuid(0x00000001, regs);
588             unsigned apic_id = regs[1] >> 24;
589 
590             /* Query the total core count for the CPU */
591             uint32_t core_count = 1;
592             if (regs[3] & (1 << 28))
593                core_count = (regs[1] >> 16) & 0xff;
594 
595             core_count = util_next_power_of_two(core_count);
596 
597             /* Query the L3 cache count. */
598             cpuid_count(0x8000001D, 3, regs);
599             unsigned cache_level = (regs[0] >> 5) & 0x7;
600             unsigned cores_per_L3 = ((regs[0] >> 14) & 0xfff) + 1;
601 
602             if (cache_level != 3)
603                continue;
604 
605             unsigned local_core_id = apic_id & (core_count - 1);
606             unsigned phys_id = (apic_id & ~(core_count - 1)) >> util_logbase2(core_count);
607             unsigned local_l3_cache_index = local_core_id / util_next_power_of_two(cores_per_L3);
608 #define L3_ID(p, i) (p << 16 | i << 1 | 1);
609 
610             unsigned l3_id = L3_ID(phys_id, local_l3_cache_index);
611             int idx = -1;
612             for (unsigned c = 0; c < num_L3_caches; c++) {
613                if (L3_found[c] == l3_id) {
614                   idx = c;
615                   break;
616                }
617             }
618             if (idx == -1) {
619                idx = num_L3_caches;
620                L3_found[num_L3_caches++] = l3_id;
621                L3_affinity_masks = realloc(L3_affinity_masks, sizeof(util_affinity_mask) * num_L3_caches);
622                if (!L3_affinity_masks)
623                   return;
624                memset(&L3_affinity_masks[num_L3_caches - 1], 0, sizeof(util_affinity_mask));
625             }
626             util_cpu_caps.cpu_to_L3[i] = idx;
627             L3_affinity_masks[idx][i / 32] |= cpu_bit;
628 
629          }
630          mask[i / 32] = 0;
631       }
632 
633       util_cpu_caps.num_L3_caches = num_L3_caches;
634       util_cpu_caps.L3_affinity_mask = L3_affinity_masks;
635 
636       if (saved) {
637          if (debug_get_option_dump_cpu()) {
638             fprintf(stderr, "CPU <-> L3 cache mapping:\n");
639             for (unsigned i = 0; i < util_cpu_caps.num_L3_caches; i++) {
640                fprintf(stderr, "  - L3 %u mask = ", i);
641                for (int j = util_cpu_caps.max_cpus - 1; j >= 0; j -= 32)
642                   fprintf(stderr, "%08x ", util_cpu_caps.L3_affinity_mask[i][j / 32]);
643                fprintf(stderr, "\n");
644             }
645          }
646 
647          /* Restore the original affinity mask. */
648          util_set_current_thread_affinity(saved_mask, NULL,
649                                           util_cpu_caps.num_cpu_mask_bits);
650       } else {
651          if (debug_get_option_dump_cpu())
652             fprintf(stderr, "Cannot set thread affinity for any thread.\n");
653       }
654    }
655 #endif
656 }
657 
658 static
check_cpu_caps_override(void)659 void check_cpu_caps_override(void)
660 {
661    const char *override_cpu_caps = debug_get_option("GALLIUM_OVERRIDE_CPU_CAPS", NULL);
662 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
663    if (debug_get_bool_option("GALLIUM_NOSSE", false)) {
664       util_cpu_caps.has_sse = 0;
665    }
666 #if MESA_DEBUG
667    /* For simulating less capable machines */
668    if (debug_get_bool_option("LP_FORCE_SSE2", false)) {
669       util_cpu_caps.has_sse3 = 0;
670    }
671 #endif
672 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
673 
674    if (override_cpu_caps != NULL) {
675 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
676       if (!strcmp(override_cpu_caps, "nosse")) {
677          util_cpu_caps.has_sse = 0;
678       } else if (!strcmp(override_cpu_caps, "sse")) {
679          util_cpu_caps.has_sse2 = 0;
680       } else if (!strcmp(override_cpu_caps, "sse2")) {
681          util_cpu_caps.has_sse3 = 0;
682       } else if (!strcmp(override_cpu_caps, "sse3")) {
683          util_cpu_caps.has_ssse3 = 0;
684       } else if (!strcmp(override_cpu_caps, "ssse3")) {
685          util_cpu_caps.has_sse4_1 = 0;
686       } else if (!strcmp(override_cpu_caps, "sse4.1")) {
687          util_cpu_caps.has_avx = 0;
688       } else if (!strcmp(override_cpu_caps, "avx")) {
689          util_cpu_caps.has_avx512f = 0;
690       }
691 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
692    }
693 
694 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
695    if (!util_cpu_caps.has_sse) {
696       util_cpu_caps.has_sse2 = 0;
697    }
698    if (!util_cpu_caps.has_sse2) {
699       util_cpu_caps.has_sse3 = 0;
700    }
701    if (!util_cpu_caps.has_sse3) {
702       util_cpu_caps.has_ssse3 = 0;
703    }
704    if (!util_cpu_caps.has_ssse3) {
705       util_cpu_caps.has_sse4_1 = 0;
706    }
707    if (!util_cpu_caps.has_sse4_1) {
708       util_cpu_caps.has_sse4_2 = 0;
709       util_cpu_caps.has_avx = 0;
710    }
711    if (!util_cpu_caps.has_avx) {
712       util_cpu_caps.has_avx2 = 0;
713       util_cpu_caps.has_f16c = 0;
714       util_cpu_caps.has_fma = 0;
715       util_cpu_caps.has_avx512f = 0;
716    }
717    if (!util_cpu_caps.has_avx512f) {
718       /* avx512 are cleared */
719       util_cpu_caps.has_avx512dq   = 0;
720       util_cpu_caps.has_avx512ifma = 0;
721       util_cpu_caps.has_avx512pf   = 0;
722       util_cpu_caps.has_avx512er   = 0;
723       util_cpu_caps.has_avx512cd   = 0;
724       util_cpu_caps.has_avx512bw   = 0;
725       util_cpu_caps.has_avx512vl   = 0;
726       util_cpu_caps.has_avx512vbmi = 0;
727    }
728 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
729 }
730 
731 static
check_max_vector_bits(void)732 void check_max_vector_bits(void)
733 {
734    /* Leave it at 128, even when no SIMD extensions are available.
735     * Really needs to be a multiple of 128 so can fit 4 floats.
736     */
737    util_cpu_caps.max_vector_bits = 128;
738 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
739    if (util_cpu_caps.has_avx512f) {
740       util_cpu_caps.max_vector_bits = 512;
741    } else if (util_cpu_caps.has_avx) {
742       util_cpu_caps.max_vector_bits = 256;
743    }
744 #endif
745 }
746 
747 void _util_cpu_detect_once(void);
748 
749 void
_util_cpu_detect_once(void)750 _util_cpu_detect_once(void)
751 {
752    int available_cpus = 0;
753    int total_cpus = 0;
754 
755    memset(&util_cpu_caps, 0, sizeof util_cpu_caps);
756 
757    /* Count the number of CPUs in system */
758 #if DETECT_OS_WINDOWS
759    {
760       SYSTEM_INFO system_info;
761       GetSystemInfo(&system_info);
762       available_cpus = MAX2(1, system_info.dwNumberOfProcessors);
763    }
764 #elif DETECT_OS_POSIX
765 #  if defined(HAS_SCHED_GETAFFINITY)
766    {
767       /* sched_setaffinity() can be used to further restrict the number of
768        * CPUs on which the process can run.  Use sched_getaffinity() to
769        * determine the true number of available CPUs.
770        *
771        * FIXME: The Linux manual page for sched_getaffinity describes how this
772        * simple implementation will fail with > 1024 CPUs, and we'll fall back
773        * to the _SC_NPROCESSORS_ONLN path.  Support for > 1024 CPUs can be
774        * added to this path once someone has such a system for testing.
775        */
776       cpu_set_t affin;
777       if (sched_getaffinity(getpid(), sizeof(affin), &affin) == 0)
778          available_cpus = CPU_COUNT(&affin);
779    }
780 #  endif
781 
782    /* Linux, FreeBSD, DragonFly, and Mac OS X should have
783     * _SC_NOPROCESSORS_ONLN.  NetBSD and OpenBSD should have HW_NCPUONLINE.
784     * This is what FFmpeg uses on those platforms.
785     */
786 #  if DETECT_OS_BSD && defined(HW_NCPUONLINE)
787    if (available_cpus == 0) {
788       const int mib[] = { CTL_HW, HW_NCPUONLINE };
789       int ncpu;
790       size_t len = sizeof(ncpu);
791 
792       sysctl(mib, 2, &ncpu, &len, NULL, 0);
793       available_cpus = ncpu;
794    }
795 #  elif defined(_SC_NPROCESSORS_ONLN)
796    if (available_cpus == 0) {
797       available_cpus = sysconf(_SC_NPROCESSORS_ONLN);
798       if (available_cpus == ~0)
799          available_cpus = 1;
800    }
801 #  elif DETECT_OS_BSD
802    if (available_cpus == 0) {
803       const int mib[] = { CTL_HW, HW_NCPU };
804       int ncpu;
805       int len = sizeof(ncpu);
806 
807       sysctl(mib, 2, &ncpu, &len, NULL, 0);
808       available_cpus = ncpu;
809    }
810 #  endif /* DETECT_OS_BSD */
811 
812    /* Determine the maximum number of CPUs configured in the system.  This is
813     * used to properly set num_cpu_mask_bits below.  On BSDs that don't have
814     * HW_NCPUONLINE, it was not clear whether HW_NCPU is the number of
815     * configured or the number of online CPUs.  For that reason, prefer the
816     * _SC_NPROCESSORS_CONF path on all BSDs.
817     */
818 #  if defined(_SC_NPROCESSORS_CONF)
819    total_cpus = sysconf(_SC_NPROCESSORS_CONF);
820    if (total_cpus == ~0)
821       total_cpus = 1;
822 #  elif DETECT_OS_BSD
823    {
824       const int mib[] = { CTL_HW, HW_NCPU };
825       int ncpu;
826       int len = sizeof(ncpu);
827 
828       sysctl(mib, 2, &ncpu, &len, NULL, 0);
829       total_cpus = ncpu;
830    }
831 #  endif /* DETECT_OS_BSD */
832 #endif /* DETECT_OS_POSIX */
833 
834    util_cpu_caps.nr_cpus = MAX2(1, available_cpus);
835    total_cpus = MAX2(total_cpus, util_cpu_caps.nr_cpus);
836 
837    util_cpu_caps.max_cpus = total_cpus;
838    util_cpu_caps.num_cpu_mask_bits = align(total_cpus, 32);
839 
840    /* Make the fallback cacheline size nonzero so that it can be
841     * safely passed to align().
842     */
843    util_cpu_caps.cacheline = sizeof(void *);
844 
845 #if DETECT_ARCH_X86 || DETECT_ARCH_X86_64
846    if (has_cpuid()) {
847       uint32_t regs[4];
848       uint32_t regs2[4];
849 
850       util_cpu_caps.cacheline = 32;
851 
852       /* Get max cpuid level */
853       cpuid(0x00000000, regs);
854 
855       if (regs[0] >= 0x00000001) {
856          unsigned int cacheline;
857 
858          cpuid (0x00000001, regs2);
859 
860          util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf;
861          /* Add "extended family". */
862          if (util_cpu_caps.x86_cpu_type == 0xf)
863              util_cpu_caps.x86_cpu_type += ((regs2[0] >> 20) & 0xff);
864 
865          switch (util_cpu_caps.x86_cpu_type) {
866          case 0x17:
867             util_cpu_caps.family = CPU_AMD_ZEN1_ZEN2;
868             break;
869          case 0x18:
870             util_cpu_caps.family = CPU_AMD_ZEN_HYGON;
871             break;
872          case 0x19:
873             util_cpu_caps.family = CPU_AMD_ZEN3;
874             break;
875          default:
876             if (util_cpu_caps.x86_cpu_type > 0x19)
877                util_cpu_caps.family = CPU_AMD_ZEN_NEXT;
878          }
879 
880          /* general feature flags */
881          util_cpu_caps.has_mmx    = (regs2[3] >> 23) & 1; /* 0x0800000 */
882          util_cpu_caps.has_sse    = (regs2[3] >> 25) & 1; /* 0x2000000 */
883          util_cpu_caps.has_sse2   = (regs2[3] >> 26) & 1; /* 0x4000000 */
884          util_cpu_caps.has_sse3   = (regs2[2] >>  0) & 1; /* 0x0000001 */
885          util_cpu_caps.has_ssse3  = (regs2[2] >>  9) & 1; /* 0x0000020 */
886          util_cpu_caps.has_sse4_1 = (regs2[2] >> 19) & 1;
887          util_cpu_caps.has_sse4_2 = (regs2[2] >> 20) & 1;
888          util_cpu_caps.has_popcnt = (regs2[2] >> 23) & 1;
889          util_cpu_caps.has_avx    = ((regs2[2] >> 28) & 1) && // AVX
890                                     ((regs2[2] >> 27) & 1) && // OSXSAVE
891                                     ((xgetbv() & 6) == 6);    // XMM & YMM
892          util_cpu_caps.has_f16c   = ((regs2[2] >> 29) & 1) && util_cpu_caps.has_avx;
893          util_cpu_caps.has_fma    = ((regs2[2] >> 12) & 1) && util_cpu_caps.has_avx;
894          util_cpu_caps.has_mmx2   = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */
895 #if DETECT_ARCH_X86_64
896          util_cpu_caps.has_daz = 1;
897 #else
898          util_cpu_caps.has_daz = util_cpu_caps.has_sse3 ||
899             (util_cpu_caps.has_sse2 && sse2_has_daz());
900 #endif
901 
902          cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
903          if (cacheline > 0)
904             util_cpu_caps.cacheline = cacheline;
905       }
906       if (regs[0] >= 0x00000007) {
907          uint32_t regs7[4];
908          cpuid_count(0x00000007, 0x00000000, regs7);
909          util_cpu_caps.has_clflushopt = (regs7[1] >> 23) & 1;
910          if (util_cpu_caps.has_avx) {
911             util_cpu_caps.has_avx2 = (regs7[1] >> 5) & 1;
912 
913             // check for avx512
914             if (xgetbv() & (0x7 << 5)) { // OPMASK: upper-256 enabled by OS
915                util_cpu_caps.has_avx512f    = (regs7[1] >> 16) & 1;
916                util_cpu_caps.has_avx512dq   = (regs7[1] >> 17) & 1;
917                util_cpu_caps.has_avx512ifma = (regs7[1] >> 21) & 1;
918                util_cpu_caps.has_avx512pf   = (regs7[1] >> 26) & 1;
919                util_cpu_caps.has_avx512er   = (regs7[1] >> 27) & 1;
920                util_cpu_caps.has_avx512cd   = (regs7[1] >> 28) & 1;
921                util_cpu_caps.has_avx512bw   = (regs7[1] >> 30) & 1;
922                util_cpu_caps.has_avx512vl   = (regs7[1] >> 31) & 1;
923                util_cpu_caps.has_avx512vbmi = (regs7[2] >>  1) & 1;
924             }
925          }
926       }
927 
928       if (regs[1] == 0x756e6547 && regs[2] == 0x6c65746e && regs[3] == 0x49656e69) {
929          /* GenuineIntel */
930          util_cpu_caps.has_intel = 1;
931       }
932 
933       cpuid(0x80000000, regs);
934 
935       if (regs[0] >= 0x80000001) {
936 
937          cpuid(0x80000001, regs2);
938 
939          util_cpu_caps.has_mmx  |= (regs2[3] >> 23) & 1;
940          util_cpu_caps.has_mmx2 |= (regs2[3] >> 22) & 1;
941          util_cpu_caps.has_3dnow = (regs2[3] >> 31) & 1;
942          util_cpu_caps.has_3dnow_ext = (regs2[3] >> 30) & 1;
943 
944          util_cpu_caps.has_xop = util_cpu_caps.has_avx &&
945                                  ((regs2[2] >> 11) & 1);
946       }
947 
948       if (regs[0] >= 0x80000006) {
949          /* should we really do this if the clflush size above worked? */
950          unsigned int cacheline;
951          cpuid(0x80000006, regs2);
952          cacheline = regs2[2] & 0xFF;
953          if (cacheline > 0)
954             util_cpu_caps.cacheline = cacheline;
955       }
956    }
957 #endif /* DETECT_ARCH_X86 || DETECT_ARCH_X86_64 */
958 
959 #if DETECT_ARCH_ARM || DETECT_ARCH_AARCH64
960    check_os_arm_support();
961 #endif
962 
963 #if DETECT_ARCH_PPC
964    check_os_altivec_support();
965 #endif /* DETECT_ARCH_PPC */
966 
967 #if DETECT_ARCH_MIPS64
968    check_os_mips64_support();
969 #endif /* DETECT_ARCH_MIPS64 */
970 
971 #if DETECT_ARCH_LOONGARCH64
972    check_os_loongarch64_support();
973 #endif /* DETECT_ARCH_LOONGARCH64 */
974 
975 #if DETECT_ARCH_S390
976    util_cpu_caps.family = CPU_S390X;
977 #endif
978 
979    check_cpu_caps_override();
980 
981    /* max_vector_bits should be checked after cpu caps override */
982    check_max_vector_bits();
983 
984    get_cpu_topology();
985 
986    if (debug_get_option_dump_cpu()) {
987       printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus);
988 
989       printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type);
990       printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline);
991 
992       printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx);
993       printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2);
994       printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse);
995       printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2);
996       printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3);
997       printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3);
998       printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1);
999       printf("util_cpu_caps.has_sse4_2 = %u\n", util_cpu_caps.has_sse4_2);
1000       printf("util_cpu_caps.has_avx = %u\n", util_cpu_caps.has_avx);
1001       printf("util_cpu_caps.has_avx2 = %u\n", util_cpu_caps.has_avx2);
1002       printf("util_cpu_caps.has_f16c = %u\n", util_cpu_caps.has_f16c);
1003       printf("util_cpu_caps.has_popcnt = %u\n", util_cpu_caps.has_popcnt);
1004       printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow);
1005       printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext);
1006       printf("util_cpu_caps.has_xop = %u\n", util_cpu_caps.has_xop);
1007       printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec);
1008       printf("util_cpu_caps.has_vsx = %u\n", util_cpu_caps.has_vsx);
1009       printf("util_cpu_caps.has_neon = %u\n", util_cpu_caps.has_neon);
1010       printf("util_cpu_caps.has_msa = %u\n", util_cpu_caps.has_msa);
1011       printf("util_cpu_caps.has_daz = %u\n", util_cpu_caps.has_daz);
1012       printf("util_cpu_caps.has_lsx = %u\n", util_cpu_caps.has_lsx);
1013       printf("util_cpu_caps.has_lasx = %u\n", util_cpu_caps.has_lasx);
1014       printf("util_cpu_caps.has_avx512f = %u\n", util_cpu_caps.has_avx512f);
1015       printf("util_cpu_caps.has_avx512dq = %u\n", util_cpu_caps.has_avx512dq);
1016       printf("util_cpu_caps.has_avx512ifma = %u\n", util_cpu_caps.has_avx512ifma);
1017       printf("util_cpu_caps.has_avx512pf = %u\n", util_cpu_caps.has_avx512pf);
1018       printf("util_cpu_caps.has_avx512er = %u\n", util_cpu_caps.has_avx512er);
1019       printf("util_cpu_caps.has_avx512cd = %u\n", util_cpu_caps.has_avx512cd);
1020       printf("util_cpu_caps.has_avx512bw = %u\n", util_cpu_caps.has_avx512bw);
1021       printf("util_cpu_caps.has_avx512vl = %u\n", util_cpu_caps.has_avx512vl);
1022       printf("util_cpu_caps.has_avx512vbmi = %u\n", util_cpu_caps.has_avx512vbmi);
1023       printf("util_cpu_caps.has_clflushopt = %u\n", util_cpu_caps.has_clflushopt);
1024       printf("util_cpu_caps.num_L3_caches = %u\n", util_cpu_caps.num_L3_caches);
1025       printf("util_cpu_caps.num_cpu_mask_bits = %u\n", util_cpu_caps.num_cpu_mask_bits);
1026    }
1027    _util_cpu_caps_state.caps = util_cpu_caps;
1028 
1029    /* This must happen at the end as it's used to guard everything else */
1030    p_atomic_set(&_util_cpu_caps_state.detect_done, 1);
1031 }
1032