1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "partition_alloc/page_allocator.h"
6
7 #include <algorithm>
8 #include <cstdint>
9 #include <cstdlib>
10 #include <cstring>
11 #include <string>
12 #include <vector>
13
14 #include "build/build_config.h"
15 #include "partition_alloc/address_space_randomization.h"
16 #include "partition_alloc/page_allocator_constants.h"
17 #include "partition_alloc/partition_alloc_base/cpu.h"
18 #include "partition_alloc/partition_alloc_base/logging.h"
19 #include "partition_alloc/partition_alloc_base/notreached.h"
20 #include "partition_alloc/partition_alloc_buildflags.h"
21 #include "partition_alloc/partition_alloc_config.h"
22 #include "partition_alloc/tagging.h"
23
24 #if defined(LINUX_NAME_REGION)
25 #include "base/debug/proc_maps_linux.h"
26 #endif
27
28 #include "testing/gtest/include/gtest/gtest.h"
29
30 #if BUILDFLAG(IS_POSIX)
31 #include <sys/mman.h>
32 #include <sys/time.h>
33
34 #include <csetjmp>
35 #include <csignal>
36 #endif // BUILDFLAG(IS_POSIX)
37
38 #include "partition_alloc/arm_bti_test_functions.h"
39
40 #if BUILDFLAG(HAS_MEMORY_TAGGING)
41 #include <arm_acle.h>
42 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
43 #define MTE_KILLED_BY_SIGNAL_AVAILABLE
44 #endif
45 #endif // BUILDFLAG(HAS_MEMORY_TAGGING)
46
47 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
48
49 namespace partition_alloc::internal {
50
51 namespace {
52
53 // Any number of bytes that can be allocated with no trouble.
EasyAllocSize()54 size_t EasyAllocSize() {
55 return (1024 * 1024) & ~(PageAllocationGranularity() - 1);
56 }
57
58 // A huge amount of memory, greater than or equal to the ASLR space.
HugeMemoryAmount()59 size_t HugeMemoryAmount() {
60 return std::max(::partition_alloc::internal::ASLRMask(),
61 std::size_t{2} * ::partition_alloc::internal::ASLRMask());
62 }
63
64 } // namespace
65
TEST(PartitionAllocPageAllocatorTest,Rounding)66 TEST(PartitionAllocPageAllocatorTest, Rounding) {
67 EXPECT_EQ(0u, RoundUpToSystemPage(0u));
68 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(1));
69 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize() - 1));
70 EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize()));
71 EXPECT_EQ(2 * SystemPageSize(), RoundUpToSystemPage(SystemPageSize() + 1));
72 EXPECT_EQ(0u, RoundDownToSystemPage(0u));
73 EXPECT_EQ(0u, RoundDownToSystemPage(SystemPageSize() - 1));
74 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize()));
75 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize() + 1));
76 EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(2 * SystemPageSize() - 1));
77 EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
78 EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(1));
79 EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(
80 PageAllocationGranularity() - 1));
81 EXPECT_EQ(PageAllocationGranularity(),
82 RoundUpToPageAllocationGranularity(PageAllocationGranularity()));
83 EXPECT_EQ(
84 2 * PageAllocationGranularity(),
85 RoundUpToPageAllocationGranularity(PageAllocationGranularity() + 1));
86 EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
87 EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(
88 PageAllocationGranularity() - 1));
89 EXPECT_EQ(PageAllocationGranularity(),
90 RoundDownToPageAllocationGranularity(PageAllocationGranularity()));
91 EXPECT_EQ(PageAllocationGranularity(), RoundDownToPageAllocationGranularity(
92 PageAllocationGranularity() + 1));
93 EXPECT_EQ(PageAllocationGranularity(),
94 RoundDownToPageAllocationGranularity(
95 2 * PageAllocationGranularity() - 1));
96 }
97
TEST(PartitionAllocPageAllocatorTest,NextAlignedWithOffset)98 TEST(PartitionAllocPageAllocatorTest, NextAlignedWithOffset) {
99 EXPECT_EQ(1024u, NextAlignedWithOffset(1024, 1, 0));
100 EXPECT_EQ(2024u, NextAlignedWithOffset(1024, 1024, 1000));
101 EXPECT_EQ(2024u, NextAlignedWithOffset(2024, 1024, 1000));
102 EXPECT_EQ(3048u, NextAlignedWithOffset(2025, 1024, 1000));
103 EXPECT_EQ(2048u, NextAlignedWithOffset(1024, 2048, 0));
104 EXPECT_EQ(2148u, NextAlignedWithOffset(1024, 2048, 100));
105 EXPECT_EQ(2000u, NextAlignedWithOffset(1024, 2048, 2000));
106 }
107
108 // Test that failed page allocations invoke base::ReleaseReservation().
109 // We detect this by making a reservation and ensuring that after failure, we
110 // can make a new reservation.
TEST(PartitionAllocPageAllocatorTest,AllocFailure)111 TEST(PartitionAllocPageAllocatorTest, AllocFailure) {
112 // Release any reservation made by another test.
113 ReleaseReservation();
114
115 // We can make a reservation.
116 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
117
118 // We can't make another reservation until we trigger an allocation failure.
119 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
120
121 size_t size = HugeMemoryAmount();
122 // Skip the test for sanitizers and platforms with ASLR turned off.
123 if (size == 0) {
124 return;
125 }
126
127 uintptr_t result =
128 AllocPages(size, PageAllocationGranularity(),
129 PageAccessibilityConfiguration(
130 PageAccessibilityConfiguration::kInaccessible),
131 PageTag::kChromium);
132 if (!result) {
133 // We triggered allocation failure. Our reservation should have been
134 // released, and we should be able to make a new reservation.
135 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
136 ReleaseReservation();
137 return;
138 }
139 // We couldn't fail. Make sure reservation is still there.
140 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
141 }
142
143 // TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
144 #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
145 #define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
146 #else
147 #define MAYBE_ReserveAddressSpace ReserveAddressSpace
148 #endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
149
150 // Test that reserving address space can fail.
TEST(PartitionAllocPageAllocatorTest,MAYBE_ReserveAddressSpace)151 TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) {
152 // Release any reservation made by another test.
153 ReleaseReservation();
154
155 size_t size = HugeMemoryAmount();
156 // Skip the test for sanitizers and platforms with ASLR turned off.
157 if (size == 0) {
158 return;
159 }
160
161 bool success = ReserveAddressSpace(size);
162 if (!success) {
163 EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
164 return;
165 }
166 // We couldn't fail. Make sure reservation is still there.
167 EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
168 }
169
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePages)170 TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) {
171 uintptr_t buffer =
172 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
173 PageAccessibilityConfiguration(
174 PageAccessibilityConfiguration::kReadWrite),
175 PageTag::kChromium);
176 EXPECT_TRUE(buffer);
177 int* buffer0 = reinterpret_cast<int*>(buffer);
178 *buffer0 = 42;
179 EXPECT_EQ(42, *buffer0);
180 FreePages(buffer, PageAllocationGranularity());
181 }
182
TEST(PartitionAllocPageAllocatorTest,AllocPagesAligned)183 TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) {
184 size_t alignment = 8 * PageAllocationGranularity();
185 size_t sizes[] = {PageAllocationGranularity(),
186 alignment - PageAllocationGranularity(), alignment,
187 alignment + PageAllocationGranularity(), alignment * 4};
188 size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
189 alignment - PageAllocationGranularity()};
190 for (size_t size : sizes) {
191 for (size_t offset : offsets) {
192 uintptr_t buffer = AllocPagesWithAlignOffset(
193 0, size, alignment, offset,
194 PageAccessibilityConfiguration(
195 PageAccessibilityConfiguration::kReadWrite),
196 PageTag::kChromium);
197 EXPECT_TRUE(buffer);
198 EXPECT_EQ(buffer % alignment, offset);
199 FreePages(buffer, size);
200 }
201 }
202 }
203
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTagged)204 TEST(PartitionAllocPageAllocatorTest,
205 AllocAndFreePagesWithPageReadWriteTagged) {
206 // This test checks that a page allocated with
207 // PageAccessibilityConfiguration::kReadWriteTagged is safe to use on all
208 // systems (even those which don't support MTE).
209 uintptr_t buffer =
210 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
211 PageAccessibilityConfiguration(
212 PageAccessibilityConfiguration::kReadWriteTagged),
213 PageTag::kChromium);
214 EXPECT_TRUE(buffer);
215 int* buffer0 = reinterpret_cast<int*>(buffer);
216 *buffer0 = 42;
217 EXPECT_EQ(42, *buffer0);
218 FreePages(buffer, PageAllocationGranularity());
219 }
220
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadExecuteConfirmCFI)221 TEST(PartitionAllocPageAllocatorTest,
222 AllocAndFreePagesWithPageReadExecuteConfirmCFI) {
223 // This test checks that indirect branches to anything other than a valid
224 // branch target in a PageAccessibilityConfiguration::kReadExecute-mapped
225 // crash on systems which support the Armv8.5 Branch Target Identification
226 // extension.
227 base::CPU cpu;
228 if (!cpu.has_bti()) {
229 #if BUILDFLAG(IS_IOS)
230 // Workaround for incorrectly failed iOS tests with GTEST_SKIP,
231 // see crbug.com/912138 for details.
232 return;
233 #else
234 GTEST_SKIP();
235 #endif
236 }
237 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
238 // Next, map some read-write memory and copy the BTI-enabled function there.
239 uintptr_t buffer =
240 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
241 PageAccessibilityConfiguration(
242 PageAccessibilityConfiguration::kReadWrite),
243 PageTag::kChromium);
244 ptrdiff_t function_range =
245 reinterpret_cast<char*>(arm_bti_test_function_end) -
246 reinterpret_cast<char*>(arm_bti_test_function);
247 ptrdiff_t invalid_offset =
248 reinterpret_cast<char*>(arm_bti_test_function_invalid_offset) -
249 reinterpret_cast<char*>(arm_bti_test_function);
250 memcpy(reinterpret_cast<void*>(buffer),
251 reinterpret_cast<void*>(arm_bti_test_function), function_range);
252
253 // Next re-protect the page.
254 SetSystemPagesAccess(
255 buffer, PageAllocationGranularity(),
256 PageAccessibilityConfiguration(
257 PageAccessibilityConfiguration::kReadExecuteProtected));
258
259 using BTITestFunction = int64_t (*)(int64_t);
260
261 // Attempt to call the function through the BTI-enabled entrypoint. Confirm
262 // that it works.
263 BTITestFunction bti_enabled_fn = reinterpret_cast<BTITestFunction>(buffer);
264 BTITestFunction bti_invalid_fn =
265 reinterpret_cast<BTITestFunction>(buffer + invalid_offset);
266 EXPECT_EQ(bti_enabled_fn(15), 18);
267 // Next, attempt to call the function without the entrypoint.
268 EXPECT_EXIT({ bti_invalid_fn(15); }, testing::KilledBySignal(SIGILL),
269 ""); // Should crash with SIGILL.
270 FreePages(buffer, PageAllocationGranularity());
271 #else
272 PA_NOTREACHED();
273 #endif
274 }
275
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTaggedSynchronous)276 TEST(PartitionAllocPageAllocatorTest,
277 AllocAndFreePagesWithPageReadWriteTaggedSynchronous) {
278 // This test checks that a page allocated with
279 // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
280 // if allocated on a system which supports the
281 // Armv8.5 Memory Tagging Extension.
282 base::CPU cpu;
283 if (!cpu.has_mte()) {
284 // Skip this test if there's no MTE.
285 #if BUILDFLAG(IS_IOS)
286 return;
287 #else
288 GTEST_SKIP();
289 #endif
290 }
291
292 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
293 uintptr_t buffer =
294 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
295 PageAccessibilityConfiguration(
296 PageAccessibilityConfiguration::kReadWriteTagged),
297 PageTag::kChromium);
298 EXPECT_TRUE(buffer);
299 int* buffer0 = reinterpret_cast<int*>(buffer);
300 // Assign an 0x1 tag to the first granule of buffer.
301 int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1);
302 EXPECT_NE(buffer0, buffer1);
303 __arm_mte_set_tag(buffer1);
304 // Retrieve the tag to ensure that it's set.
305 buffer1 = __arm_mte_get_tag(buffer0);
306 // Prove that the tag is different (if they're the same, the test won't work).
307 ASSERT_NE(buffer0, buffer1);
308 TagViolationReportingMode parent_tagging_mode =
309 GetMemoryTaggingModeForCurrentThread();
310 EXPECT_EXIT(
311 {
312 // Switch to synchronous mode.
313 #if BUILDFLAG(IS_ANDROID)
314 bool success = ChangeMemoryTaggingModeForAllThreadsPerProcess(
315 TagViolationReportingMode::kSynchronous);
316 EXPECT_TRUE(success);
317 #else
318 ChangeMemoryTaggingModeForCurrentThread(
319 TagViolationReportingMode::kSynchronous);
320 #endif // BUILDFLAG(IS_ANDROID)
321 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
322 TagViolationReportingMode::kSynchronous);
323 // Write to the buffer using its previous tag. A segmentation fault
324 // should be delivered.
325 *buffer0 = 42;
326 },
327 testing::KilledBySignal(SIGSEGV), "");
328 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
329 FreePages(buffer, PageAllocationGranularity());
330 #else
331 PA_NOTREACHED();
332 #endif
333 }
334
TEST(PartitionAllocPageAllocatorTest,AllocAndFreePagesWithPageReadWriteTaggedAsynchronous)335 TEST(PartitionAllocPageAllocatorTest,
336 AllocAndFreePagesWithPageReadWriteTaggedAsynchronous) {
337 // This test checks that a page allocated with
338 // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
339 // if allocated on a system which supports MTE.
340 base::CPU cpu;
341 if (!cpu.has_mte()) {
342 // Skip this test if there's no MTE.
343 #if BUILDFLAG(IS_IOS)
344 return;
345 #else
346 GTEST_SKIP();
347 #endif
348 }
349
350 #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
351 uintptr_t buffer =
352 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
353 PageAccessibilityConfiguration(
354 PageAccessibilityConfiguration::kReadWriteTagged),
355 PageTag::kChromium);
356 EXPECT_TRUE(buffer);
357 int* buffer0 = reinterpret_cast<int*>(buffer);
358 __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1));
359 int* buffer1 = __arm_mte_get_tag(buffer0);
360 EXPECT_NE(buffer0, buffer1);
361 TagViolationReportingMode parent_tagging_mode =
362 GetMemoryTaggingModeForCurrentThread();
363 EXPECT_EXIT(
364 {
365 // Switch to asynchronous mode.
366 #if BUILDFLAG(IS_ANDROID)
367 bool success = ChangeMemoryTaggingModeForAllThreadsPerProcess(
368 TagViolationReportingMode::kAsynchronous);
369 EXPECT_TRUE(success);
370 #else
371 ChangeMemoryTaggingModeForCurrentThread(
372 TagViolationReportingMode::kAsynchronous);
373 #endif // BUILDFLAG(IS_ANDROID)
374 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
375 TagViolationReportingMode::kAsynchronous);
376 // Write to the buffer using its previous tag. A fault should be
377 // generated at this point but we may not notice straight away...
378 *buffer0 = 42;
379 EXPECT_EQ(42, *buffer0);
380 PA_LOG(ERROR) << "="; // Until we receive control back from the kernel
381 // (e.g. on a system call).
382 },
383 testing::KilledBySignal(SIGSEGV), "");
384 FreePages(buffer, PageAllocationGranularity());
385 EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
386 #else
387 PA_NOTREACHED();
388 #endif
389 }
390
391 // Test permission setting on POSIX, where we can set a trap handler.
392 #if BUILDFLAG(IS_POSIX)
393
394 namespace {
395 sigjmp_buf g_continuation;
396
SignalHandler(int signal,siginfo_t * info,void *)397 void SignalHandler(int signal, siginfo_t* info, void*) {
398 siglongjmp(g_continuation, 1);
399 }
400 } // namespace
401
402 // On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
403 #if BUILDFLAG(IS_APPLE)
404 #define EXTRA_FAULT_BEGIN_ACTION() \
405 struct sigaction old_bus_action; \
406 sigaction(SIGBUS, &action, &old_bus_action);
407 #define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
408 #else
409 #define EXTRA_FAULT_BEGIN_ACTION()
410 #define EXTRA_FAULT_END_ACTION()
411 #endif
412
413 // Install a signal handler so we can catch the fault we're about to trigger.
414 #define FAULT_TEST_BEGIN() \
415 struct sigaction action = {}; \
416 struct sigaction old_action = {}; \
417 action.sa_sigaction = SignalHandler; \
418 sigemptyset(&action.sa_mask); \
419 action.sa_flags = SA_SIGINFO; \
420 sigaction(SIGSEGV, &action, &old_action); \
421 EXTRA_FAULT_BEGIN_ACTION(); \
422 int const save_sigs = 1; \
423 if (!sigsetjmp(g_continuation, save_sigs)) {
424 // Fault generating code goes here...
425
426 // Handle when sigsetjmp returns nonzero (we are returning from our handler).
427 #define FAULT_TEST_END() \
428 } \
429 else { \
430 sigaction(SIGSEGV, &old_action, nullptr); \
431 EXTRA_FAULT_END_ACTION(); \
432 }
433
TEST(PartitionAllocPageAllocatorTest,InaccessiblePages)434 TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) {
435 uintptr_t buffer =
436 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
437 PageAccessibilityConfiguration(
438 PageAccessibilityConfiguration::kInaccessible),
439 PageTag::kChromium);
440 EXPECT_TRUE(buffer);
441
442 FAULT_TEST_BEGIN()
443
444 // Reading from buffer should fault.
445 // Volatile prevents the compiler from eliminating the load by folding
446 // buffer0_contents == *buffer0.
447 volatile int* buffer0 = reinterpret_cast<int*>(buffer);
448 int buffer0_contents = *buffer0;
449 EXPECT_EQ(buffer0_contents, *buffer0);
450 EXPECT_TRUE(false);
451
452 FAULT_TEST_END()
453
454 FreePages(buffer, PageAllocationGranularity());
455 }
456
457 // TODO(crbug.com/1291888): Understand why we can't read from Read-Execute pages
458 // on iOS.
459 #if BUILDFLAG(IS_IOS)
460 #define MAYBE_ReadExecutePages DISABLED_ReadExecutePages
461 #else
462 #define MAYBE_ReadExecutePages ReadExecutePages
463 #endif // BUILDFLAG(IS_IOS)
TEST(PartitionAllocPageAllocatorTest,MAYBE_ReadExecutePages)464 TEST(PartitionAllocPageAllocatorTest, MAYBE_ReadExecutePages) {
465 uintptr_t buffer =
466 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
467 PageAccessibilityConfiguration(
468 PageAccessibilityConfiguration::kReadExecute),
469 PageTag::kChromium);
470 EXPECT_TRUE(buffer);
471 int* buffer0 = reinterpret_cast<int*>(buffer);
472 // Reading from buffer should succeed.
473 int buffer0_contents = *buffer0;
474
475 FAULT_TEST_BEGIN()
476
477 // Writing to buffer should fault.
478 *buffer0 = ~buffer0_contents;
479 EXPECT_TRUE(false);
480
481 FAULT_TEST_END()
482
483 // Make sure no write occurred.
484 EXPECT_EQ(buffer0_contents, *buffer0);
485 FreePages(buffer, PageAllocationGranularity());
486 }
487
488 #endif // BUILDFLAG(IS_POSIX)
489
490 #if defined(LINUX_NAME_REGION)
TEST(PartitionAllocPageAllocatorTest,PageTagging)491 TEST(PartitionAllocPageAllocatorTest, PageTagging) {
492 size_t size = PageAllocationGranularity();
493 uintptr_t buffer =
494 AllocPages(size, PageAllocationGranularity(),
495 PageAccessibilityConfiguration(
496 PageAccessibilityConfiguration::kInaccessible),
497 PageTag::kChromium);
498 ASSERT_TRUE(buffer);
499
500 auto is_region_named = [](uintptr_t start_address) {
501 std::string proc_maps;
502 EXPECT_TRUE(::base::debug::ReadProcMaps(&proc_maps));
503 std::vector<::base::debug::MappedMemoryRegion> regions;
504 EXPECT_TRUE(::base::debug::ParseProcMaps(proc_maps, ®ions));
505
506 bool found = false;
507 for (const auto& region : regions) {
508 if (region.start == start_address) {
509 found = true;
510 return "[anon:chromium]" == region.path;
511 }
512 }
513 EXPECT_TRUE(found);
514 return false;
515 };
516
517 bool before = is_region_named(buffer);
518 DecommitAndZeroSystemPages(buffer, size);
519 bool after = is_region_named(buffer);
520
521 #if BUILDFLAG(IS_ANDROID)
522 EXPECT_TRUE(before) << "VMA tagging should always work on Android";
523 #endif
524 // When not running on Android, the prctl() command may be defined in the
525 // headers, but not be implemented by the host kernel.
526 EXPECT_EQ(before, after);
527
528 FreePages(buffer, size);
529 }
530 #endif // defined(LINUX_NAME_REGION)
531
TEST(PartitionAllocPageAllocatorTest,DecommitErasesMemory)532 TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
533 if (!DecommittedMemoryIsAlwaysZeroed()) {
534 return;
535 }
536
537 size_t size = PageAllocationGranularity();
538 uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
539 PageAccessibilityConfiguration(
540 PageAccessibilityConfiguration::kReadWrite),
541 PageTag::kChromium);
542 ASSERT_TRUE(buffer);
543
544 memset(reinterpret_cast<void*>(buffer), 42, size);
545
546 DecommitSystemPages(buffer, size,
547 PageAccessibilityDisposition::kAllowKeepForPerf);
548 RecommitSystemPages(buffer, size,
549 PageAccessibilityConfiguration(
550 PageAccessibilityConfiguration::kReadWrite),
551 PageAccessibilityDisposition::kAllowKeepForPerf);
552
553 uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
554 uint32_t sum = 0;
555 for (size_t i = 0; i < size; i++) {
556 sum += recommitted_buffer[i];
557 }
558 EXPECT_EQ(0u, sum) << "Data was not erased";
559
560 FreePages(buffer, size);
561 }
562
TEST(PartitionAllocPageAllocatorTest,DecommitAndZero)563 TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) {
564 size_t size = PageAllocationGranularity();
565 uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
566 PageAccessibilityConfiguration(
567 PageAccessibilityConfiguration::kReadWrite),
568 PageTag::kChromium);
569 ASSERT_TRUE(buffer);
570
571 memset(reinterpret_cast<void*>(buffer), 42, size);
572
573 DecommitAndZeroSystemPages(buffer, size);
574
575 // Test permission setting on POSIX, where we can set a trap handler.
576 #if BUILDFLAG(IS_POSIX)
577
578 FAULT_TEST_BEGIN()
579
580 // Reading from buffer should now fault.
581 int* buffer0 = reinterpret_cast<int*>(buffer);
582 int buffer0_contents = *buffer0;
583 EXPECT_EQ(buffer0_contents, *buffer0);
584 EXPECT_TRUE(false);
585
586 FAULT_TEST_END()
587
588 #endif
589
590 // Clients of the DecommitAndZero API (in particular, V8), currently just
591 // call SetSystemPagesAccess to mark the region as accessible again, so we
592 // use that here as well.
593 SetSystemPagesAccess(buffer, size,
594 PageAccessibilityConfiguration(
595 PageAccessibilityConfiguration::kReadWrite));
596
597 uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
598 uint32_t sum = 0;
599 for (size_t i = 0; i < size; i++) {
600 sum += recommitted_buffer[i];
601 }
602 EXPECT_EQ(0u, sum) << "Data was not erased";
603
604 FreePages(buffer, size);
605 }
606
TEST(PartitionAllocPageAllocatorTest,MappedPagesAccounting)607 TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) {
608 size_t size = PageAllocationGranularity();
609 // Ask for a large alignment to make sure that trimming doesn't change the
610 // accounting.
611 size_t alignment = 128 * PageAllocationGranularity();
612 size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
613 alignment - PageAllocationGranularity()};
614
615 size_t mapped_size_before = GetTotalMappedSize();
616
617 for (size_t offset : offsets) {
618 uintptr_t data = AllocPagesWithAlignOffset(
619 0, size, alignment, offset,
620 PageAccessibilityConfiguration(
621 PageAccessibilityConfiguration::kInaccessible),
622 PageTag::kChromium);
623 ASSERT_TRUE(data);
624
625 EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
626
627 DecommitSystemPages(data, size,
628 PageAccessibilityDisposition::kAllowKeepForPerf);
629 EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
630
631 FreePages(data, size);
632 EXPECT_EQ(mapped_size_before, GetTotalMappedSize());
633 }
634 }
635
TEST(PartitionAllocPageAllocatorTest,AllocInaccessibleWillJitLater)636 TEST(PartitionAllocPageAllocatorTest, AllocInaccessibleWillJitLater) {
637 // Verify that kInaccessibleWillJitLater allows read/write, and read/execute
638 // permissions to be set.
639 uintptr_t buffer =
640 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
641 PageAccessibilityConfiguration(
642 PageAccessibilityConfiguration::kInaccessibleWillJitLater),
643 PageTag::kChromium);
644 EXPECT_TRUE(
645 TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
646 PageAccessibilityConfiguration(
647 PageAccessibilityConfiguration::kReadWrite)));
648 EXPECT_TRUE(TrySetSystemPagesAccess(
649 buffer, PageAllocationGranularity(),
650 PageAccessibilityConfiguration(
651 PageAccessibilityConfiguration::kReadExecute)));
652 FreePages(buffer, PageAllocationGranularity());
653 }
654
655 #if BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
656 // TODO(crbug.com/1452151): Fix test to GTEST_SKIP() if MAP_JIT is in-use,
657 // or to be run otherwise, since kReadWriteExecute is used in some other
658 // configurations.
659 #define MAYBE_AllocReadWriteExecute DISABLED_AllocReadWriteExecute
660 #else
661 #define MAYBE_AllocReadWriteExecute AllocReadWriteExecute
662 #endif // BUILDFLAG(IS_IOS) || BUILDFLAG(IS_MAC)
TEST(PartitionAllocPageAllocatorTest,MAYBE_AllocReadWriteExecute)663 TEST(PartitionAllocPageAllocatorTest, MAYBE_AllocReadWriteExecute) {
664 // Verify that kReadWriteExecute is similarly functional.
665 uintptr_t buffer =
666 AllocPages(PageAllocationGranularity(), PageAllocationGranularity(),
667 PageAccessibilityConfiguration(
668 PageAccessibilityConfiguration::kReadWriteExecute),
669 PageTag::kChromium);
670 EXPECT_TRUE(
671 TrySetSystemPagesAccess(buffer, PageAllocationGranularity(),
672 PageAccessibilityConfiguration(
673 PageAccessibilityConfiguration::kReadWrite)));
674 EXPECT_TRUE(TrySetSystemPagesAccess(
675 buffer, PageAllocationGranularity(),
676 PageAccessibilityConfiguration(
677 PageAccessibilityConfiguration::kReadExecute)));
678 FreePages(buffer, PageAllocationGranularity());
679 }
680
681 } // namespace partition_alloc::internal
682
683 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
684