1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "build/build_config.h"
6 #include "partition_alloc/partition_alloc_base/cpu.h"
7 #include "testing/gtest/include/gtest/gtest.h"
8 
9 namespace partition_alloc {
10 
11 // Tests whether we can run extended instructions represented by the CPU
12 // information. This test actually executes some extended instructions (such as
13 // MMX, SSE, etc.) supported by the CPU and sees we can run them without
14 // "undefined instruction" exceptions. That is, this test succeeds when this
15 // test finishes without a crash.
TEST(CPUPA,RunExtendedInstructions)16 TEST(CPUPA, RunExtendedInstructions) {
17   // Retrieve the CPU information.
18   internal::base::CPU cpu;
19 #if defined(ARCH_CPU_X86_FAMILY)
20 
21   ASSERT_TRUE(cpu.has_mmx());
22   ASSERT_TRUE(cpu.has_sse());
23   ASSERT_TRUE(cpu.has_sse2());
24   ASSERT_TRUE(cpu.has_sse3());
25 
26 // GCC and clang instruction test.
27 #if defined(COMPILER_GCC)
28   // Execute an MMX instruction.
29   __asm__ __volatile__("emms\n" : : : "mm0");
30 
31   // Execute an SSE instruction.
32   __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
33 
34   // Execute an SSE 2 instruction.
35   __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
36 
37   // Execute an SSE 3 instruction.
38   __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
39 
40   if (cpu.has_ssse3()) {
41     // Execute a Supplimental SSE 3 instruction.
42     __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
43   }
44 
45   if (cpu.has_sse41()) {
46     // Execute an SSE 4.1 instruction.
47     __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
48   }
49 
50   if (cpu.has_sse42()) {
51     // Execute an SSE 4.2 instruction.
52     __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
53   }
54 
55   if (cpu.has_popcnt()) {
56     // Execute a POPCNT instruction.
57     __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
58   }
59 
60   if (cpu.has_avx()) {
61     // Execute an AVX instruction.
62     __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
63   }
64 
65   if (cpu.has_fma3()) {
66     // Execute a FMA3 instruction.
67     __asm__ __volatile__("vfmadd132ps %%xmm0, %%xmm0, %%xmm0\n" : : : "xmm0");
68   }
69 
70   if (cpu.has_avx2()) {
71     // Execute an AVX 2 instruction.
72     __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
73   }
74 
75   if (cpu.has_pku()) {
76     // rdpkru
77     uint32_t pkru;
78     __asm__ __volatile__(".byte 0x0f,0x01,0xee\n"
79                          : "=a"(pkru)
80                          : "c"(0), "d"(0));
81   }
82 // Visual C 32 bit and ClangCL 32/64 bit test.
83 #elif defined(COMPILER_MSVC) &&   \
84     (defined(ARCH_CPU_32_BITS) || \
85      (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
86 
87   // Execute an MMX instruction.
88   __asm emms;
89 
90   // Execute an SSE instruction.
91   __asm xorps xmm0, xmm0;
92 
93   // Execute an SSE 2 instruction.
94   __asm psrldq xmm0, 0;
95 
96   // Execute an SSE 3 instruction.
97   __asm addsubpd xmm0, xmm0;
98 
99   if (cpu.has_ssse3()) {
100     // Execute a Supplimental SSE 3 instruction.
101     __asm psignb xmm0, xmm0;
102   }
103 
104   if (cpu.has_sse41()) {
105     // Execute an SSE 4.1 instruction.
106     __asm pmuldq xmm0, xmm0;
107   }
108 
109   if (cpu.has_sse42()) {
110     // Execute an SSE 4.2 instruction.
111     __asm crc32 eax, eax;
112   }
113 
114   if (cpu.has_popcnt()) {
115     // Execute a POPCNT instruction.
116     __asm popcnt eax, eax;
117   }
118 
119   if (cpu.has_avx()) {
120     // Execute an AVX instruction.
121     __asm vzeroupper;
122   }
123 
124   if (cpu.has_fma3()) {
125     // Execute an AVX instruction.
126     __asm vfmadd132ps xmm0, xmm0, xmm0;
127   }
128 
129   if (cpu.has_avx2()) {
130     // Execute an AVX 2 instruction.
131     __asm vpunpcklbw ymm0, ymm0, ymm0
132   }
133 #endif  // defined(COMPILER_GCC)
134 #endif  // defined(ARCH_CPU_X86_FAMILY)
135 
136 #if defined(ARCH_CPU_ARM64)
137   // Check that the CPU is correctly reporting support for the Armv8.5-A memory
138   // tagging extension. The new MTE instructions aren't encoded in NOP space
139   // like BTI/Pointer Authentication and will crash older cores with a SIGILL if
140   // used incorrectly. This test demonstrates how it should be done and that
141   // this approach works.
142   if (cpu.has_mte()) {
143 #if !defined(__ARM_FEATURE_MEMORY_TAGGING)
144     // In this section, we're running on an MTE-compatible core, but we're
145     // building this file without MTE support. Fail this test to indicate that
146     // there's a problem with the base/ build configuration.
147     GTEST_FAIL()
148         << "MTE support detected (but base/ built without MTE support)";
149 #else
150     char ptr[32];
151     uint64_t val;
152     // Execute a trivial MTE instruction. Normally, MTE should be used via the
153     // intrinsics documented at
154     // https://developer.arm.com/documentation/101028/0012/10--Memory-tagging-intrinsics,
155     // this test uses the irg (Insert Random Tag) instruction directly to make
156     // sure that it's not optimized out by the compiler.
157     __asm__ __volatile__("irg %0, %1" : "=r"(val) : "r"(ptr));
158 #endif  // __ARM_FEATURE_MEMORY_TAGGING
159   }
160 #endif  // ARCH_CPU_ARM64
161 }
162 
163 }  // namespace partition_alloc
164